aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2019-06-04 04:20:41 -0700
committerLinux Build Service Account <lnxbuild@localhost>2019-06-04 04:20:41 -0700
commita426d589b2309a61c7a6f19448bcde273fe34549 (patch)
tree0445fef6809ba02f5255ceb0405ac5b9c1d04549
parent8950f61a30d68754a957dee84920c8a431df0dda (diff)
parentead62404cc33682eed48e3a5982f4f1d393c4b4d (diff)
Merge ead62404cc33682eed48e3a5982f4f1d393c4b4d on remote branchLA.UM.6.8.2.r1-01500-SDM710.0
Change-Id: Ie6eff4ef1cd2f17836642ee972dcaa8c0b4d99ed
-rw-r--r--Documentation/devicetree/bindings/arm/msm/hyp_core_ctl.txt15
-rw-r--r--Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt14
-rw-r--r--Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8937-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8953-pinctrl.txt9
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sdm670-pinctrl14
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl15
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sdxpoorwill-pinctrl13
-rw-r--r--Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt1
-rw-r--r--arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi3
-rw-r--r--arch/arm/kernel/topology.c128
-rw-r--r--arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi7
-rw-r--r--arch/arm64/boot/dts/qcom/qcs605.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi51
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi68
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi15
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670-sde.dtsi14
-rw-r--r--arch/arm64/boot/dts/qcom/sdm670.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi36
-rw-r--r--arch/arm64/configs/okl4_virtual_platform_sdm670_vm_defconfig3
-rw-r--r--arch/arm64/kernel/perf_event.c4
-rw-r--r--block/bio.c1
-rw-r--r--drivers/android/binder.c208
-rw-r--r--drivers/android/binder_alloc.c32
-rw-r--r--drivers/android/binder_alloc.h3
-rw-r--r--drivers/char/adsprpc.c529
-rw-r--r--drivers/char/adsprpc_compat.c18
-rw-r--r--drivers/char/adsprpc_shared.h19
-rw-r--r--drivers/char/diag/diag_dci.c4
-rw-r--r--drivers/char/diag/diag_masks.c1
-rw-r--r--drivers/char/diag/diagchar_core.c19
-rw-r--r--drivers/char/diag/diagfwd_cntl.c7
-rw-r--r--drivers/clk/qcom/clk-rcg2.c35
-rw-r--r--drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c620
-rw-r--r--drivers/clk/qcom/mdss/mdss-pll.h24
-rw-r--r--drivers/crypto/msm/ice.c4
-rw-r--r--drivers/crypto/msm/qcedev.c5
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-msm-smp2p-test.c763
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c29
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c12
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h21
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_clk.h73
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c494
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c116
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h26
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h13
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c57
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h39
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_defs.h45
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.c851
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.h22
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.c72
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.h2
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_hw.h12
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_panel.c164
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_panel.h11
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy.c150
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy.h50
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h55
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c229
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c18
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c101
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c30
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h9
-rw-r--r--drivers/gpu/drm/msm/sde/sde_ad4.h1
-rw-r--r--drivers/gpu/drm/msm/sde/sde_color_processing.c71
-rw-r--r--drivers/gpu/drm/msm/sde/sde_color_processing.h9
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c41
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h10
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c6
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.h2
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c38
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c34
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.c34
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ad4.c34
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c10
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.h5
-rw-r--r--drivers/gpu/drm/msm/sde_dbg.c34
-rw-r--r--drivers/gpu/msm/kgsl.c4
-rw-r--r--drivers/i2c/busses/i2c-msm-v2.c8
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c36
-rw-r--r--drivers/iommu/dma-mapping-fast.c14
-rw-r--r--drivers/iommu/iommu-debug.c24
-rw-r--r--drivers/irqchip/irq-gic.c49
-rw-r--r--drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h6
-rw-r--r--drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c11
-rw-r--r--drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c5
-rw-r--r--drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h6
-rw-r--r--drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c7
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_context.c27
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_context.h50
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_context_utils.c44
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_context_utils.h5
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h65
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_node.c75
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_node.h5
-rw-r--r--drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c8
-rw-r--r--drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h2
-rw-r--r--drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c4
-rw-r--r--drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c4
-rw-r--r--drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c17
-rw-r--r--drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c10
-rw-r--r--drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h4
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c44
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c29
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h8
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/hfi.c19
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c2
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h2
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c6
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c170
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h2
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c7
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c210
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h1
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c58
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h7
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c236
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h11
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c6
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c8
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h8
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h5
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c198
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h8
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h1
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h3
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c4
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c87
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h11
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c45
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c59
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h4
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c151
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h4
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h5
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c21
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h2
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c16
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c75
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h23
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c39
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h1
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h2
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c12
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h4
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h4
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c29
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h4
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c52
-rw-r--r--drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c2
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c36
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h7
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile1
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c212
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h26
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h2
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile2
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c29
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c12
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h7
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile13
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c971
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h14
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c347
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h86
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c21
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile1
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c22
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h6
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c65
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h7
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c77
-rw-r--r--drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h5
-rw-r--r--drivers/media/platform/msm/camera/cam_smmu/Makefile1
-rw-r--r--drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c153
-rw-r--r--drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h35
-rw-r--r--drivers/media/platform/msm/camera/cam_sync/cam_sync.c24
-rw-r--r--drivers/media/platform/msm/camera/cam_utils/cam_common_util.h6
-rw-r--r--drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c13
-rw-r--r--drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c6
-rw-r--r--drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h18
-rw-r--r--drivers/media/platform/msm/vidc/hfi_response_handler.c65
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_clocks.c10
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c16
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c85
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_api.h5
-rw-r--r--drivers/media/platform/msm/vidc_3x/hfi_response_handler.c65
-rw-r--r--drivers/media/platform/msm/vidc_3x/venus_hfi.c13
-rw-r--r--drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h5
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c3
-rw-r--r--drivers/misc/Kconfig15
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/qseecom.c15
-rw-r--r--drivers/misc/scbuf-client.c819
-rw-r--r--drivers/misc/scbuf-client.h204
-rw-r--r--drivers/mmc/core/mmc.c7
-rw-r--r--drivers/perf/arm_pmu.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c86
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_intf.c21
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_nat.c14
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c21
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_intf.c19
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_nat.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c48
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c13
-rw-r--r--drivers/platform/msm/qcom-geni-se.c8
-rw-r--r--drivers/platform/msm/sps/sps.c30
-rw-r--r--drivers/platform/msm/sps/sps_bam.c27
-rw-r--r--drivers/platform/msm/sps/sps_dma.c11
-rw-r--r--drivers/platform/msm/sps/sps_mem.c10
-rw-r--r--drivers/platform/msm/sps/sps_rm.c15
-rw-r--r--drivers/soc/qcom/Kconfig9
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c8
-rw-r--r--drivers/soc/qcom/glink_spi_xprt.c33
-rw-r--r--drivers/soc/qcom/hyp_core_ctl.c628
-rw-r--r--drivers/soc/qcom/icnss.c79
-rw-r--r--drivers/soc/qcom/memshare/msm_memshare.c20
-rw-r--r--drivers/soc/qcom/memshare/msm_memshare.h2
-rw-r--r--drivers/soc/qcom/msm_smem.c167
-rw-r--r--drivers/soc/qcom/secure_buffer.c162
-rw-r--r--drivers/soc/qcom/service-notifier.c2
-rw-r--r--drivers/spi/spi-geni-qcom.c6
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c29
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c18
-rw-r--r--drivers/staging/android/ion/ion_system_secure_heap.c10
-rw-r--r--drivers/thermal/cpu_cooling.c25
-rw-r--r--drivers/thermal/qpnp-adc-tm.c52
-rw-r--r--drivers/tty/tty_io.c4
-rw-r--r--drivers/usb/pd/policy_engine.c3
-rw-r--r--drivers/video/fbdev/msm/mdss_debug.c36
-rw-r--r--fs/crypto/fscrypt_ice.h2
-rw-r--r--fs/crypto/keyinfo.c2
-rw-r--r--fs/ext4/balloc.c14
-rw-r--r--fs/ext4/ext4.h8
-rw-r--r--fs/ext4/ext4_extents.h1
-rw-r--r--fs/ext4/extents.c6
-rw-r--r--fs/ext4/ialloc.c66
-rw-r--r--fs/ext4/inline.c38
-rw-r--r--fs/ext4/inode.c3
-rw-r--r--fs/ext4/mballoc.c6
-rw-r--r--fs/ext4/super.c14
-rw-r--r--fs/ext4/xattr.c49
-rw-r--r--fs/f2fs/data.c3
-rw-r--r--fs/f2fs/super.c6
-rw-r--r--fs/inode.c6
-rw-r--r--fs/jbd2/transaction.c9
-rw-r--r--fs/proc/inode.c9
-rw-r--r--fs/proc/internal.h3
-rw-r--r--fs/proc/root.c52
-rw-r--r--fs/sdcardfs/file.c24
-rw-r--r--fs/sdcardfs/inode.c213
-rw-r--r--fs/sdcardfs/lookup.c9
-rw-r--r--fs/sdcardfs/sdcardfs.h25
-rw-r--r--include/crypto/ice.h12
-rw-r--r--include/dt-bindings/clock/mdss-10nm-pll-clk.h38
-rw-r--r--include/linux/cpu_cooling.h19
-rw-r--r--include/linux/fscrypt.h11
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/perf_event.h14
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmacache.h5
-rw-r--r--include/microvisor/kernel/microvisor.h9
-rw-r--r--include/microvisor/kernel/syscalls.h221
-rw-r--r--include/microvisor/kernel/types.h738
-rw-r--r--include/microvisor/microvisor.h10
-rw-r--r--include/soc/qcom/icnss.h4
-rw-r--r--include/trace/events/hyp_core_ctl.h8
-rw-r--r--include/uapi/linux/android/binder.h19
-rw-r--r--include/uapi/linux/scbuf.h24
-rw-r--r--include/uapi/media/cam_req_mgr.h3
-rw-r--r--include/uapi/media/cam_sync.h2
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/cpuset.c18
-rw-r--r--kernel/events/core.c225
-rw-r--r--kernel/sched/core.c19
-rw-r--r--kernel/sched/energy.c5
-rw-r--r--kernel/sched/rt.c9
-rw-r--r--kernel/sched/sched.h10
-rw-r--r--kernel/sched/walt.c4
-rw-r--r--kernel/smpboot.c5
-rw-r--r--lib/qmi_encdec.c6
-rw-r--r--mm/debug.c4
-rw-r--r--mm/huge_memory.c10
-rw-r--r--mm/mremap.c30
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/vmacache.c38
-rw-r--r--net/key/af_key.c40
-rw-r--r--net/xfrm/xfrm_user.c3
-rwxr-xr-xscripts/checkpatch.pl2
-rw-r--r--security/pfe/pfk.c26
-rw-r--r--security/pfe/pfk_ext4.c17
-rw-r--r--security/pfe/pfk_ext4.h4
-rw-r--r--security/pfe/pfk_f2fs.c16
-rw-r--r--security/pfe/pfk_f2fs.h4
-rw-r--r--security/pfe/pfk_ice.c63
-rw-r--r--security/pfe/pfk_ice.h2
-rw-r--r--security/pfe/pfk_kc.c21
-rw-r--r--security/pfe/pfk_kc.h3
-rw-r--r--sound/soc/soc-ops.c4
309 files changed, 11155 insertions, 4314 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/hyp_core_ctl.txt b/Documentation/devicetree/bindings/arm/msm/hyp_core_ctl.txt
new file mode 100644
index 000000000000..31a915fa4018
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/hyp_core_ctl.txt
@@ -0,0 +1,15 @@
+Qualcomm Technologies, Inc. Core Control for Hypervisor
+
+Required properties:
+- compatible: should be "qcom,hyp-core-ctl"
+- reg: An array of u32 values. reg[0] contains the token id to be used
+ for hyp core_ctl system calls to set/get physical CPUs corresponding
+ to the virtual CPUs. reg[1] ... reg[n] indicate the token ids
+ to be used while referring to the virtual CPUs respectively.
+
+Example:
+
+ hyp-core-ctl@346 {
+ compatible = "qcom,hyp-core-ctl";
+ reg = <0x346 0x347 0x348>;
+ };
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 82e0d96c5cf4..2afc3404bd88 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -164,10 +164,7 @@ Optional properties:
"dfps_immediate_porch_mode_vfp" = FPS change request is
implemented immediately by changing panel vertical
front porch values.
-- qcom,min-refresh-rate: Minimum refresh rate supported by the panel.
-- qcom,max-refresh-rate: Maximum refresh rate supported by the panel. If max refresh
- rate is not specified, then the frame rate of the panel in
- qcom,mdss-dsi-panel-framerate is used.
+- qcom,dsi-supported-dfps-list: List containing all the supported refresh rates.
- qcom,mdss-dsi-bl-pmic-control-type: A string that specifies the implementation of backlight
control for this panel.
"bl_ctrl_pwm" = Backlight controlled by PWM gpio.
@@ -522,6 +519,10 @@ Optional properties:
- qcom,mdss-dsi-panel-cmds-only-by-right: Boolean used to mention whether the panel support DSI1 or
DSI0 to send commands. If this was set, that mean the panel only support
DSI1 to send commands, otherwise DSI0 will send comands.
+- qcom,dsi-dyn-clk-enable: Boolean to indicate dsi dynamic clock switch feature
+ is supported.
+- qcom,dsi-dyn-clk-list: An u32 array which lists all the supported dsi bit clock
+ frequencies in Hz for the given panel.
Required properties for sub-nodes: None
Optional properties:
@@ -643,8 +644,7 @@ Example:
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update = "dfps_suspend_resume_mode";
- qcom,min-refresh-rate = <30>;
- qcom,max-refresh-rate = <60>;
+ qcom,dsi-supported-dfps-list = <48 55 60>;
qcom,mdss-dsi-bl-pmic-bank-select = <0>;
qcom,mdss-dsi-bl-pmic-pwm-frequency = <0>;
qcom,mdss-dsi-pwm-gpio = <&pm8941_mpps 5 0>;
@@ -774,5 +774,7 @@ Example:
qcom,display-topology = <1 1 1>,
<2 2 1>;
qcom,default-topology-index = <0>;
+ qcom,dsi-dyn-clk-enable;
+ qcom,dsi-dyn-clk-list = <798240576 801594528 804948480>;
};
};
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
index 21edaa0cf621..950884c25024 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
@@ -17,6 +17,9 @@ Required properties:
Optional property:
- qcom,clk-freq-out : Desired I2C bus clock frequency in Hz.
When missing default to 400000Hz.
+ - qcom,disable-autosuspend : Disable autosuspend for I2C controller and
+ I2C clients should call pm_runtime_get_sync()/put_sync() for the
+ I2C controller.
Child nodes should conform to i2c bus binding.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8937-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8937-pinctrl.txt
index f6977048350b..39872c512932 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8937-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8937-pinctrl.txt
@@ -11,7 +11,13 @@ MSM8937 platform.
- reg:
Usage: required
Value type: <prop-encoded-array>
- Definition: the base address and size of the TLMM register space.
+ Definition: the base address and size of the TLMM register space
+ provided as "pinctrl_regs".
+
+- reg-names:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Provides labels for the reg property.
- interrupts:
Usage: required
@@ -167,6 +173,7 @@ Example:
tlmm: pinctrl@1000000 {
compatible = "qcom,msm8937-pinctrl";
reg = <0x1000000 0x300000>;
+ reg-names = "pinctrl_regs";
interrupts = <0 208 0>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8953-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8953-pinctrl.txt
index 4b483e5d05b1..8da0a755c886 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8953-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8953-pinctrl.txt
@@ -11,7 +11,13 @@ MSM8953 platform.
- reg:
Usage: required
Value type: <prop-encoded-array>
- Definition: the base address and size of the TLMM register space.
+ Definition: the base address and size of the TLMM register space
+ provided as "pinctrl_regs".
+
+- reg-names:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Provides labels for the reg property.
- interrupts:
Usage: required
@@ -173,6 +179,7 @@ Example:
tlmm: pinctrl@1000000 {
compatible = "qcom,msm8953-pinctrl";
reg = <0x1000000 0x300000>;
+ reg-names = "pinctrl_regs";
interrupts = <0 208 0>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdm670-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdm670-pinctrl
index 0eb1043ffc0b..2b3dc0176f41 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sdm670-pinctrl
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm670-pinctrl
@@ -11,7 +11,16 @@ SDM670 platform.
- reg:
Usage: required
Value type: <prop-encoded-array>
- Definition: the base address and size of the TLMM register space.
+ Definition: the base address and size of the TLMM register space
+ provided as "pinctrl_regs", optional base address of
+ PDC mux selection registers provided as "pdc_regs"
+ and optional base address of shared SPI config
+ registers provided as "spi_cfg_regs".
+
+- reg-names:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Provides labels for the reg property.
- interrupts:
Usage: required
@@ -137,7 +146,8 @@ Example:
tlmm: pinctrl@03400000 {
compatible = "qcom,sdm670-pinctrl";
- reg = <0x03400000 0xc00000>;
+ reg = <0x03400000 0xc00000>, <0x179900f0 0x60>;
+ reg-names = "pinctrl_regs", "spi_cfg_regs";
interrupts = <0 208 0>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
index 7e75d2cae4a6..f8fc5d0209fd 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm845-pinctrl
@@ -11,7 +11,17 @@ SDM845 platform.
- reg:
Usage: required
Value type: <prop-encoded-array>
- Definition: the base address and size of the TLMM register space.
+ Definition: the base address and size of the TLMM register space
+ provided as "pinctrl_regs", optional base address of
+ PDC mux selection registers provided as "pdc_regs"
+ and optional base address of shared SPI config
+ registers provided as "spi_cfg_regs".
+
+- reg-names:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Provides labels for the reg property.
+
- interrupts:
Usage: required
@@ -177,7 +187,8 @@ Example:
tlmm: pinctrl@03400000 {
compatible = "qcom,sdm845-pinctrl";
- reg = <0x03800000 0xc00000>;
+ reg = <0x03800000 0xc00000>, <0x179900f0 0x60>;
+ reg-names = "pinctrl_regs", "spi_cfg_regs";
interrupts = <0 208 0>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdxpoorwill-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdxpoorwill-pinctrl
index 9a69084a64a8..552cec811e93 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sdxpoorwill-pinctrl
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdxpoorwill-pinctrl
@@ -11,7 +11,17 @@ SDXPOORWILLS platform.
- reg:
Usage: required
Value type: <prop-encoded-array>
- Definition: the base address and size of the TLMM register space.
+ Definition: the base address and size of the TLMM register space
+ provided as "pinctrl_regs", optional base address of
+ PDC mux selection registers provided as "pdc_regs"
+ and optional base address of shared SPI config
+ registers provided as "spi_cfg_regs".
+
+- reg-names:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Provides labels for the reg property.
+
- interrupts:
Usage: required
@@ -178,6 +188,7 @@ Example:
tlmm: pinctrl@03900000 {
compatible = "qcom,sdxpoorwills-pinctrl";
reg = <0x03900000 0x300000>;
+ reg-names = "pinctrl_regs", "pdc_regs";
interrupts = <0 212 0>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
index a034acc98aa6..e546dc22a368 100644
--- a/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
+++ b/Documentation/devicetree/bindings/qdsp/msm-fastrpc.txt
@@ -16,6 +16,7 @@ Optional properties:
- qcom,adsp-remoteheap-vmid: FastRPC remote heap VMID list
- qcom,fastrpc-adsp-audio-pdr: Flag to enable ADSP Audio PDR
- qcom,fastrpc-adsp-sensors-pdr: Flag to enable Sensors PDR
+- qcom,secure-domains: FastRPC secure domain configuration
Optional subnodes:
- qcom,msm_fastrpc_compute_cb : Child nodes representing the compute context
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
index 6c172c10e808..bc95b0d61153 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pinctrl.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
compatible = "qcom,sdxpoorwills-pinctrl";
reg = <0x3900000 0x300000>,
<0xB204900 0x280>;
+ reg-names = "pinctrl_regs", "pdc_regs";
interrupts = <0 212 0>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 28dcd443a012..dad9fcbc3894 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/sched_energy.h>
#include <asm/cputype.h>
#include <asm/topology.h>
@@ -505,125 +506,31 @@ topology_populated:
update_cpu_capacity(cpuid);
}
-/*
- * ARM TC2 specific energy cost model data. There are no unit requirements for
- * the data. Data can be normalized to any reference point, but the
- * normalization must be consistent. That is, one bogo-joule/watt must be the
- * same quantity for all data, but we don't care what it is.
- */
-static struct idle_state idle_states_cluster_a7[] = {
- { .power = 25 }, /* arch_cpu_idle() (active idle) = WFI */
- { .power = 25 }, /* WFI */
- { .power = 10 }, /* cluster-sleep-l */
- };
-
-static struct idle_state idle_states_cluster_a15[] = {
- { .power = 70 }, /* arch_cpu_idle() (active idle) = WFI */
- { .power = 70 }, /* WFI */
- { .power = 25 }, /* cluster-sleep-b */
- };
-
-static struct capacity_state cap_states_cluster_a7[] = {
- /* Cluster only power */
- { .cap = 150, .power = 2967, }, /* 350 MHz */
- { .cap = 172, .power = 2792, }, /* 400 MHz */
- { .cap = 215, .power = 2810, }, /* 500 MHz */
- { .cap = 258, .power = 2815, }, /* 600 MHz */
- { .cap = 301, .power = 2919, }, /* 700 MHz */
- { .cap = 344, .power = 2847, }, /* 800 MHz */
- { .cap = 387, .power = 3917, }, /* 900 MHz */
- { .cap = 430, .power = 4905, }, /* 1000 MHz */
- };
-
-static struct capacity_state cap_states_cluster_a15[] = {
- /* Cluster only power */
- { .cap = 426, .power = 7920, }, /* 500 MHz */
- { .cap = 512, .power = 8165, }, /* 600 MHz */
- { .cap = 597, .power = 8172, }, /* 700 MHz */
- { .cap = 682, .power = 8195, }, /* 800 MHz */
- { .cap = 768, .power = 8265, }, /* 900 MHz */
- { .cap = 853, .power = 8446, }, /* 1000 MHz */
- { .cap = 938, .power = 11426, }, /* 1100 MHz */
- { .cap = 1024, .power = 15200, }, /* 1200 MHz */
- };
-
-static struct sched_group_energy energy_cluster_a7 = {
- .nr_idle_states = ARRAY_SIZE(idle_states_cluster_a7),
- .idle_states = idle_states_cluster_a7,
- .nr_cap_states = ARRAY_SIZE(cap_states_cluster_a7),
- .cap_states = cap_states_cluster_a7,
-};
-
-static struct sched_group_energy energy_cluster_a15 = {
- .nr_idle_states = ARRAY_SIZE(idle_states_cluster_a15),
- .idle_states = idle_states_cluster_a15,
- .nr_cap_states = ARRAY_SIZE(cap_states_cluster_a15),
- .cap_states = cap_states_cluster_a15,
-};
-
-static struct idle_state idle_states_core_a7[] = {
- { .power = 0 }, /* arch_cpu_idle (active idle) = WFI */
- { .power = 0 }, /* WFI */
- { .power = 0 }, /* cluster-sleep-l */
- };
-
-static struct idle_state idle_states_core_a15[] = {
- { .power = 0 }, /* arch_cpu_idle (active idle) = WFI */
- { .power = 0 }, /* WFI */
- { .power = 0 }, /* cluster-sleep-b */
- };
-
-static struct capacity_state cap_states_core_a7[] = {
- /* Power per cpu */
- { .cap = 150, .power = 187, }, /* 350 MHz */
- { .cap = 172, .power = 275, }, /* 400 MHz */
- { .cap = 215, .power = 334, }, /* 500 MHz */
- { .cap = 258, .power = 407, }, /* 600 MHz */
- { .cap = 301, .power = 447, }, /* 700 MHz */
- { .cap = 344, .power = 549, }, /* 800 MHz */
- { .cap = 387, .power = 761, }, /* 900 MHz */
- { .cap = 430, .power = 1024, }, /* 1000 MHz */
- };
-
-static struct capacity_state cap_states_core_a15[] = {
- /* Power per cpu */
- { .cap = 426, .power = 2021, }, /* 500 MHz */
- { .cap = 512, .power = 2312, }, /* 600 MHz */
- { .cap = 597, .power = 2756, }, /* 700 MHz */
- { .cap = 682, .power = 3125, }, /* 800 MHz */
- { .cap = 768, .power = 3524, }, /* 900 MHz */
- { .cap = 853, .power = 3846, }, /* 1000 MHz */
- { .cap = 938, .power = 5177, }, /* 1100 MHz */
- { .cap = 1024, .power = 6997, }, /* 1200 MHz */
- };
-
-static struct sched_group_energy energy_core_a7 = {
- .nr_idle_states = ARRAY_SIZE(idle_states_core_a7),
- .idle_states = idle_states_core_a7,
- .nr_cap_states = ARRAY_SIZE(cap_states_core_a7),
- .cap_states = cap_states_core_a7,
-};
-
-static struct sched_group_energy energy_core_a15 = {
- .nr_idle_states = ARRAY_SIZE(idle_states_core_a15),
- .idle_states = idle_states_core_a15,
- .nr_cap_states = ARRAY_SIZE(cap_states_core_a15),
- .cap_states = cap_states_core_a15,
-};
-
/* sd energy functions */
static inline
const struct sched_group_energy * const cpu_cluster_energy(int cpu)
{
- return cpu_topology[cpu].socket_id ? &energy_cluster_a7 :
- &energy_cluster_a15;
+ struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1];
+
+ if (sched_is_energy_aware() && !sge) {
+ pr_warn("Invalid sched_group_energy for Cluster%d\n", cpu);
+ return NULL;
+ }
+
+ return sge;
}
static inline
const struct sched_group_energy * const cpu_core_energy(int cpu)
{
- return cpu_topology[cpu].socket_id ? &energy_core_a7 :
- &energy_core_a15;
+ struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL0];
+
+ if (sched_is_energy_aware() && !sge) {
+ pr_warn("Invalid sched_group_energy for CPU%d\n", cpu);
+ return NULL;
+ }
+
+ return sge;
}
static inline int cpu_corepower_flags(void)
@@ -688,4 +595,5 @@ void __init init_cpu_topology(void)
/* Set scheduler topology descriptor */
set_sched_topology(arm_topology);
+ init_sched_energy_costs();
}
diff --git a/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
index 17ee4652bcda..0247db3f2a62 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-pinctrl.dtsi
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, 2018-2019, The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,7 +16,9 @@
tlmm: pinctrl@1000000 {
compatible = "qcom,msm8937-pinctrl";
reg = <0x1000000 0x300000>;
+ reg-names = "pinctrl_regs";
interrupts = <0 208 0>;
+ interrupts-extended = <&wakegic GIC_SPI 208 IRQ_TYPE_NONE>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
index eec350d5f822..0fd9f295e8f5 100644
--- a/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953-pinctrl.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
tlmm: pinctrl@1000000 {
compatible = "qcom,msm8953-pinctrl";
reg = <0x1000000 0x300000>;
+ reg-names = "pinctrl_regs";
interrupts = <0 208 0>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi
index 99bf1e5f4a29..382fc1d5344f 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-sde-display.dtsi
@@ -58,7 +58,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
ports {
#address-cells = <1>;
@@ -81,7 +81,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -112,11 +112,10 @@
&dsi_dual_nt35597_truly_video {
qcom,mdss-dsi-t-clk-post = <0x0D>;
qcom,mdss-dsi-t-clk-pre = <0x2D>;
- qcom,mdss-dsi-min-refresh-rate = <53>;
- qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update =
"dfps_immediate_porch_mode_vfp";
+ qcom,dsi-supported-dfps-list = <53 55 60>;
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "reg_read";
qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index 16ae8de73bf8..c1040a11fbd7 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -80,6 +80,10 @@
status = "disabled";
};
+&sde_dp {
+ qcom,max-pclk-frequency-khz = <675000>;
+};
+
&mem_dump {
rpmh {
qcom,dump-size = <0x400000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index ee1a59379841..efd86afc40c5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,7 +13,8 @@
&soc {
tlmm: pinctrl@03400000 {
compatible = "qcom,sdm670-pinctrl";
- reg = <0x03400000 0xc00000>;
+ reg = <0x03400000 0xc00000>, <0x179900F0 0x60>;
+ reg-names = "pinctrl_regs", "spi_cfg_regs";
interrupts = <0 208 0>;
gpio-controller;
#gpio-cells = <2>;
@@ -21,6 +22,52 @@
#interrupt-cells = <2>;
interrupt-parent = <&pdc>;
+ ufs_dev_reset_assert: ufs_dev_reset_assert {
+ config {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * UFS_RESET driver strengths are having
+ * different values/steps compared to typical
+ * GPIO drive strengths.
+ *
+ * Following table clarifies:
+ *
+ * HDRV value | UFS_RESET | Typical GPIO
+ * (dec) | (mA) | (mA)
+ * 0 | 0.8 | 2
+ * 1 | 1.55 | 4
+ * 2 | 2.35 | 6
+ * 3 | 3.1 | 8
+ * 4 | 3.9 | 10
+ * 5 | 4.65 | 12
+ * 6 | 5.4 | 14
+ * 7 | 6.15 | 16
+ *
+ * POR value for UFS_RESET HDRV is 3 which means
+ * 3.1mA and we want to use that. Hence just
+ * specify 8mA to "drive-strength" binding and
+ * that should result into writing 3 to HDRV
+ * field.
+ */
+ drive-strength = <8>; /* default: 3.1 mA */
+ output-low; /* active low reset */
+ };
+ };
+
+ ufs_dev_reset_deassert: ufs_dev_reset_deassert {
+ config {
+ pins = "ufs_reset";
+ bias-pull-down; /* default: pull down */
+ /*
+ * default: 3.1 mA
+ * check comments under ufs_dev_reset_assert
+ */
+ drive-strength = <8>;
+ output-high; /* active low reset */
+ };
+ };
+
/* QUPv3 South SE mappings */
/* SE 0 pin mappings */
qupv3_se0_i2c_pins: qupv3_se0_i2c_pins {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index 395e88c97348..a98b702ca715 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -134,9 +134,16 @@
qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
- <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll BYTECLK_SRC_0_CLK>,
+ <&mdss_dsi0_pll PCLK_SRC_0_CLK>,
+ <&mdss_dsi0_pll SHADOW_BYTECLK_SRC_0_CLK>,
+ <&mdss_dsi0_pll SHADOW_PCLK_SRC_0_CLK>;
+ clock-names = "mux_byte_clk", "mux_pixel_clk",
+ "src_byte_clk", "src_pixel_clk",
+ "shadow_byte_clk", "shadow_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -159,7 +166,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -183,7 +190,7 @@
qcom,dsi-phy = <&mdss_dsi_phy1>;
clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -206,8 +213,14 @@
qcom,dsi-ctrl = <&mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy1>;
clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
- <&mdss_dsi1_pll PCLK_MUX_1_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ <&mdss_dsi1_pll PCLK_MUX_1_CLK>,
+ <&mdss_dsi1_pll BYTECLK_SRC_1_CLK>,
+ <&mdss_dsi1_pll PCLK_SRC_1_CLK>,
+ <&mdss_dsi1_pll SHADOW_BYTECLK_SRC_1_CLK>,
+ <&mdss_dsi1_pll SHADOW_PCLK_SRC_1_CLK>;
+ clock-names = "mux_byte_clk", "mux_pixel_clk",
+ "src_byte_clk", "src_pixel_clk",
+ "shadow_byte_clk", "shadow_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -231,7 +244,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -249,7 +262,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -267,7 +280,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -285,7 +298,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -303,7 +316,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -321,7 +334,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -339,7 +352,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -362,7 +375,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -385,7 +398,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -407,7 +420,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -429,7 +442,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -452,7 +465,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -475,7 +488,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -498,7 +511,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
ports {
#address-cells = <1>;
@@ -546,11 +559,10 @@
&dsi_dual_nt35597_truly_video {
qcom,mdss-dsi-t-clk-post = <0x0D>;
qcom,mdss-dsi-t-clk-pre = <0x2D>;
- qcom,mdss-dsi-min-refresh-rate = <53>;
- qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update =
"dfps_immediate_porch_mode_vfp";
+ qcom,dsi-supported-dfps-list = <53 55 60>;
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "reg_read";
qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
@@ -558,6 +570,9 @@
qcom,mdss-dsi-panel-status-value = <0x9c>;
qcom,mdss-dsi-panel-on-check-value = <0x9c>;
qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,dsi-dyn-clk-enable;
+ qcom,dsi-dyn-clk-list =
+ <804948480 798240576 801594528 808302432 811656384>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -619,11 +634,10 @@
&dsi_nt35597_truly_dsc_video {
qcom,mdss-dsi-t-clk-post = <0x0b>;
qcom,mdss-dsi-t-clk-pre = <0x23>;
- qcom,mdss-dsi-min-refresh-rate = <53>;
- qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update =
"dfps_immediate_porch_mode_vfp";
+ qcom,dsi-supported-dfps-list = <53 55 60>;
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "reg_read";
qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
@@ -818,11 +832,10 @@
&dsi_nt35695b_truly_fhd_video {
qcom,mdss-dsi-t-clk-post = <0x07>;
qcom,mdss-dsi-t-clk-pre = <0x1c>;
- qcom,mdss-dsi-min-refresh-rate = <48>;
- qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update =
"dfps_immediate_porch_mode_vfp";
+ qcom,dsi-supported-dfps-list = <48 53 55 60>;
qcom,mdss-dsi-display-timings {
timing@0 {
qcom,mdss-dsi-panel-phy-timings = [00 1c 05 06 0b 0c
@@ -873,11 +886,10 @@
&dsi_hx8399_truly_cmd {
qcom,mdss-dsi-t-clk-post = <0x0E>;
qcom,mdss-dsi-t-clk-pre = <0x30>;
- qcom,mdss-dsi-min-refresh-rate = <55>;
- qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update =
"dfps_immediate_porch_mode_vfp";
+ qcom,dsi-supported-dfps-list = <55 60>;
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "reg_read";
qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
index 72e3f5f55372..326f4c004762 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,11 +18,14 @@
#clock-cells = <1>;
reg = <0xae94a00 0x1e0>,
<0xae94400 0x800>,
- <0xaf03000 0x8>;
- reg-names = "pll_base", "phy_base", "gdsc_base";
+ <0xaf03000 0x8>,
+ <0xae94200 0x100>;
+ reg-names = "pll_base", "phy_base", "gdsc_base",
+ "dynamic_pll_base";
clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
clock-names = "iface_clk";
clock-rate = <0>;
+ memory-region = <&dfps_data_memory>;
gdsc-supply = <&mdss_core_gdsc>;
qcom,platform-supply-entries {
#address-cells = <1>;
@@ -45,8 +48,10 @@
#clock-cells = <1>;
reg = <0xae96a00 0x1e0>,
<0xae96400 0x800>,
- <0xaf03000 0x8>;
- reg-names = "pll_base", "phy_base", "gdsc_base";
+ <0xaf03000 0x8>,
+ <0xae96200 0x100>;
+ reg-names = "pll_base", "phy_base", "gdsc_base",
+ "dynamic_pll_base";
clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
clock-names = "iface_clk";
clock-rate = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
index 30229982e004..0d08a03d0b4b 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -505,8 +505,9 @@
compatible = "qcom,dsi-phy-v3.0";
label = "dsi-phy-0";
cell-index = <0>;
- reg = <0xae94400 0x7c0>;
- reg-names = "dsi_phy";
+ reg = <0xae94400 0x7c0>,
+ <0xae94200 0x100>;
+ reg-names = "dsi_phy", "dyn_refresh_base";
gdsc-supply = <&mdss_core_gdsc>;
vdda-0p9-supply = <&pm660l_l1>;
qcom,platform-strength-ctrl = [55 03
@@ -538,8 +539,9 @@
compatible = "qcom,dsi-phy-v3.0";
label = "dsi-phy-1";
cell-index = <1>;
- reg = <0xae96400 0x7c0>;
- reg-names = "dsi_phy";
+ reg = <0xae96400 0x7c0>,
+ <0xae96200 0x100>;
+ reg-names = "dsi_phy", "dyn_refresh_base";
gdsc-supply = <&mdss_core_gdsc>;
vdda-0p9-supply = <&pm660l_l1>;
qcom,platform-strength-ctrl = [55 03
@@ -623,7 +625,7 @@
qcom,aux-cfg8-settings = [40 bb];
qcom,aux-cfg9-settings = [44 03];
- qcom,max-pclk-frequency-khz = <675000>;
+ qcom,max-pclk-frequency-khz = <300000>;
qcom,ctrl-supply-entries {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 2dd0b038bebb..0fa7c5930c2c 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -588,10 +588,15 @@
};
cont_splash_memory: cont_splash_region@9c000000 {
- reg = <0x0 0x9c000000 0x0 0x02400000>;
+ reg = <0x0 0x9c000000 0x0 0x2300000>;
label = "cont_splash_region";
};
+ dfps_data_memory: dfps_data_region@9e300000 {
+ reg = <0x0 0x9e300000 0x0 0x0100000>;
+ label = "dfps_data_region";
+ };
+
dump_mem: mem_dump_region {
compatible = "shared-dma-pool";
reusable;
@@ -1933,6 +1938,10 @@
qcom,pm-qos-cpu-group-latency-us = <70 70>;
qcom,pm-qos-default-cpu = <0>;
+ pinctrl-names = "dev-reset-assert", "dev-reset-deassert";
+ pinctrl-0 = <&ufs_dev_reset_assert>;
+ pinctrl-1 = <&ufs_dev_reset_deassert>;
+
resets = <&clock_gcc GCC_UFS_PHY_BCR>;
reset-names = "core_reset";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 5aaef8d46b4f..9b24f117a8a9 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,7 +13,8 @@
&soc {
tlmm: pinctrl@03400000 {
compatible = "qcom,sdm845-pinctrl";
- reg = <0x03400000 0xc00000>;
+ reg = <0x03400000 0xc00000>, <0x179900F0 0x60>;
+ reg-names = "pinctrl_regs", "spi_cfg_regs";
interrupts = <0 208 0>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 8b67649496f9..5a4b8b26f4b3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -115,7 +115,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -139,7 +139,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -163,7 +163,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -187,7 +187,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -211,7 +211,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -234,7 +234,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -258,7 +258,7 @@
qcom,dsi-phy = <&mdss_dsi_phy1>;
clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -282,7 +282,7 @@
qcom,dsi-phy = <&mdss_dsi_phy1>;
clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -306,7 +306,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -324,7 +324,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -342,7 +342,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -360,7 +360,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -378,7 +378,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -396,7 +396,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -414,7 +414,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -438,7 +438,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -462,7 +462,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -486,7 +486,7 @@
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "src_byte_clk", "src_pixel_clk";
+ clock-names = "mux_byte_clk", "mux_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
diff --git a/arch/arm64/configs/okl4_virtual_platform_sdm670_vm_defconfig b/arch/arm64/configs/okl4_virtual_platform_sdm670_vm_defconfig
index d66a33c6140b..20a1d099c11f 100644
--- a/arch/arm64/configs/okl4_virtual_platform_sdm670_vm_defconfig
+++ b/arch/arm64/configs/okl4_virtual_platform_sdm670_vm_defconfig
@@ -14,8 +14,10 @@ CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_ARCH_SDM670_VM=y
CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
CONFIG_CMA=y
CONFIG_OKL4_GUEST=y
+CONFIG_HARDEN_BRANCH_PREDICTOR=y
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
CONFIG_BINFMT_MISC=y
CONFIG_COMPAT=y
@@ -56,6 +58,7 @@ CONFIG_DMA_CMA=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_RAM=m
# CONFIG_VSERVICES_BLOCK_SERVER is not set
+CONFIG_SCBUF_CLIENT=y
# CONFIG_OKL4_LINK_SHBUF is not set
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 52710f11d473..fc7c94cd5862 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -779,8 +779,8 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- /* Ignore if we don't have an event or if it's a zombie event */
- if (!event || event->state == PERF_EVENT_STATE_ZOMBIE)
+ /* Ignore if we don't have an event */
+ if (!event || event->state != PERF_EVENT_STATE_ACTIVE)
continue;
/*
diff --git a/block/bio.c b/block/bio.c
index b1ff4629f84b..c5d9a774d06c 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -706,6 +706,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
}
}
+ bio_clone_crypt_key(bio, bio_src);
bio_clone_blkcg_association(bio, bio_src);
return bio;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 1ef2f68bfcb2..81738eb8c2fc 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -360,6 +360,8 @@ struct binder_error {
* (invariant after initialized)
* @inherit_rt: inherit RT scheduling policy from caller
* (invariant after initialized)
+ * @txn_security_ctx: require sender's security context
+ * (invariant after initialized)
* @async_todo: list of async work items
* (protected by @proc->inner_lock)
*
@@ -398,6 +400,7 @@ struct binder_node {
u8 sched_policy:2;
u8 inherit_rt:1;
u8 accept_fds:1;
+ u8 txn_security_ctx:1;
u8 min_priority;
};
bool has_async_transaction;
@@ -465,8 +468,9 @@ struct binder_ref {
};
enum binder_deferred_state {
- BINDER_DEFERRED_FLUSH = 0x01,
- BINDER_DEFERRED_RELEASE = 0x02,
+ BINDER_DEFERRED_PUT_FILES = 0x01,
+ BINDER_DEFERRED_FLUSH = 0x02,
+ BINDER_DEFERRED_RELEASE = 0x04,
};
/**
@@ -503,6 +507,9 @@ struct binder_priority {
* (invariant after initialized)
* @tsk task_struct for group_leader of process
* (invariant after initialized)
+ * @files files_struct for process
+ * (protected by @files_lock)
+ * @files_lock mutex to protect @files
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -549,6 +556,8 @@ struct binder_proc {
struct list_head waiting_threads;
int pid;
struct task_struct *tsk;
+ struct files_struct *files;
+ struct mutex files_lock;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
@@ -652,6 +661,7 @@ struct binder_transaction {
struct binder_priority saved_priority;
bool set_priority_called;
kuid_t sender_euid;
+ binder_uintptr_t security_ctx;
/**
* @lock: protects @from, @to_proc, and @to_thread
*
@@ -944,33 +954,27 @@ static void binder_free_thread(struct binder_thread *thread);
static void binder_free_proc(struct binder_proc *proc);
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
-struct files_struct *binder_get_files_struct(struct binder_proc *proc)
-{
- return get_files_struct(proc->tsk);
-}
-
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
- struct files_struct *files;
unsigned long rlim_cur;
unsigned long irqs;
int ret;
- files = binder_get_files_struct(proc);
- if (files == NULL)
- return -ESRCH;
-
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ ret = -ESRCH;
+ goto err;
+ }
if (!lock_task_sighand(proc->tsk, &irqs)) {
ret = -EMFILE;
goto err;
}
-
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- ret = __alloc_fd(files, 0, rlim_cur, flags);
+ ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
err:
- put_files_struct(files);
+ mutex_unlock(&proc->files_lock);
return ret;
}
@@ -980,12 +984,10 @@ err:
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
- struct files_struct *files = binder_get_files_struct(proc);
-
- if (files) {
- __fd_install(files, fd, file);
- put_files_struct(files);
- }
+ mutex_lock(&proc->files_lock);
+ if (proc->files)
+ __fd_install(proc->files, fd, file);
+ mutex_unlock(&proc->files_lock);
}
/*
@@ -993,21 +995,22 @@ static void task_fd_install(
*/
static long task_close_fd(struct binder_proc *proc, unsigned int fd)
{
- struct files_struct *files = binder_get_files_struct(proc);
int retval;
- if (files == NULL)
- return -ESRCH;
-
- retval = __close_fd(files, fd);
+ mutex_lock(&proc->files_lock);
+ if (proc->files == NULL) {
+ retval = -ESRCH;
+ goto err;
+ }
+ retval = __close_fd(proc->files, fd);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
retval == -ERESTARTNOINTR ||
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
- put_files_struct(files);
-
+err:
+ mutex_unlock(&proc->files_lock);
return retval;
}
@@ -1368,6 +1371,7 @@ static struct binder_node *binder_init_node_ilocked(
node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
+ node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
@@ -2900,6 +2904,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
+ char *secctx = NULL;
+ u32 secctx_sz = 0;
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -3115,6 +3121,20 @@ static void binder_transaction(struct binder_proc *proc,
t->priority = target_proc->default_priority;
}
+ if (target_node && target_node->txn_security_ctx) {
+ u32 secid;
+
+ security_task_getsecid(proc->tsk, &secid);
+ ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
+ if (ret) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_get_secctx_failed;
+ }
+ extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
+ }
+
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
@@ -3131,7 +3151,19 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
- t->buffer->allow_user_free = 0;
+ if (secctx) {
+ size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
+ ALIGN(tr->offsets_size, sizeof(void *)) +
+ ALIGN(extra_buffers_size, sizeof(void *)) -
+ ALIGN(secctx_sz, sizeof(u64));
+ char *kptr = t->buffer->data + buf_offset;
+
+ t->security_ctx = (binder_uintptr_t)kptr +
+ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
+ memcpy(kptr, secctx, secctx_sz);
+ security_release_secctx(secctx, secctx_sz);
+ secctx = NULL;
+ }
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
@@ -3402,6 +3434,9 @@ err_copy_data_failed:
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
+ if (secctx)
+ security_release_secctx(secctx, secctx_sz);
+err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
@@ -3627,14 +3662,18 @@ static int binder_thread_write(struct binder_proc *proc,
buffer = binder_alloc_prepare_to_free(&proc->alloc,
data_ptr);
- if (buffer == NULL) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
- proc->pid, thread->pid, (u64)data_ptr);
- break;
- }
- if (!buffer->allow_user_free) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
- proc->pid, thread->pid, (u64)data_ptr);
+ if (IS_ERR_OR_NULL(buffer)) {
+ if (PTR_ERR(buffer) == -EPERM) {
+ binder_user_error(
+ "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
+ proc->pid, thread->pid,
+ (u64)data_ptr);
+ } else {
+ binder_user_error(
+ "%d:%d BC_FREE_BUFFER u%016llx no match\n",
+ proc->pid, thread->pid,
+ (u64)data_ptr);
+ }
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
@@ -4044,11 +4083,13 @@ retry:
while (1) {
uint32_t cmd;
- struct binder_transaction_data tr;
+ struct binder_transaction_data_secctx tr;
+ struct binder_transaction_data *trd = &tr.transaction_data;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
+ size_t trsize = sizeof(*trd);
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&thread->todo))
@@ -4243,41 +4284,47 @@ retry:
struct binder_node *target_node = t->buffer->target_node;
struct binder_priority node_prio;
- tr.target.ptr = target_node->ptr;
- tr.cookie = target_node->cookie;
+ trd->target.ptr = target_node->ptr;
+ trd->cookie = target_node->cookie;
node_prio.sched_policy = target_node->sched_policy;
node_prio.prio = target_node->min_priority;
binder_transaction_priority(current, t, node_prio,
target_node->inherit_rt);
cmd = BR_TRANSACTION;
} else {
- tr.target.ptr = 0;
- tr.cookie = 0;
+ trd->target.ptr = 0;
+ trd->cookie = 0;
cmd = BR_REPLY;
}
- tr.code = t->code;
- tr.flags = t->flags;
- tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
+ trd->code = t->code;
+ trd->flags = t->flags;
+ trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
t_from = binder_get_txn_from(t);
if (t_from) {
struct task_struct *sender = t_from->proc->tsk;
- tr.sender_pid = task_tgid_nr_ns(sender,
- task_active_pid_ns(current));
+ trd->sender_pid =
+ task_tgid_nr_ns(sender,
+ task_active_pid_ns(current));
} else {
- tr.sender_pid = 0;
+ trd->sender_pid = 0;
}
- tr.data_size = t->buffer->data_size;
- tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)
+ trd->data_size = t->buffer->data_size;
+ trd->offsets_size = t->buffer->offsets_size;
+ trd->data.ptr.buffer = (binder_uintptr_t)
((uintptr_t)t->buffer->data +
binder_alloc_get_user_buffer_offset(&proc->alloc));
- tr.data.ptr.offsets = tr.data.ptr.buffer +
+ trd->data.ptr.offsets = trd->data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
+ if (t->security_ctx) {
+ cmd = BR_TRANSACTION_SEC_CTX;
+ tr.secctx = t->security_ctx;
+ trsize = sizeof(tr);
+ }
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4288,7 +4335,7 @@ retry:
return -EFAULT;
}
ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (copy_to_user(ptr, &tr, trsize)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4297,7 +4344,7 @@ retry:
return -EFAULT;
}
- ptr += sizeof(tr);
+ ptr += trsize;
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
@@ -4305,16 +4352,18 @@ retry:
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
- "BR_REPLY",
+ (cmd == BR_TRANSACTION_SEC_CTX) ?
+ "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
- (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
+ (u64)trd->data.ptr.buffer,
+ (u64)trd->data.ptr.offsets);
if (t_from)
binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
- if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
@@ -4636,7 +4685,8 @@ out:
return ret;
}
-static int binder_ioctl_set_ctx_mgr(struct file *filp)
+static int binder_ioctl_set_ctx_mgr(struct file *filp,
+ struct flat_binder_object *fbo)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
@@ -4665,7 +4715,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
- new_node = binder_new_node(proc, NULL);
+ new_node = binder_new_node(proc, fbo);
if (!new_node) {
ret = -ENOMEM;
goto out;
@@ -4751,8 +4801,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
binder_inner_proc_unlock(proc);
break;
}
+ case BINDER_SET_CONTEXT_MGR_EXT: {
+ struct flat_binder_object fbo;
+
+ if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
+ if (ret)
+ goto err;
+ break;
+ }
case BINDER_SET_CONTEXT_MGR:
- ret = binder_ioctl_set_ctx_mgr(filp);
+ ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;
@@ -4831,6 +4893,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
binder_alloc_vma_close(&proc->alloc);
+ binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -4872,8 +4935,12 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = proc;
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
-
- return ret;
+ if (ret)
+ return ret;
+ mutex_lock(&proc->files_lock);
+ proc->files = get_files_struct(current);
+ mutex_unlock(&proc->files_lock);
+ return 0;
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
@@ -4896,6 +4963,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
+ mutex_init(&proc->files_lock);
INIT_LIST_HEAD(&proc->todo);
if (binder_supported_policy(current->policy)) {
proc->default_priority.sched_policy = current->policy;
@@ -5052,6 +5120,8 @@ static void binder_deferred_release(struct binder_proc *proc)
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
+ BUG_ON(proc->files);
+
mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
mutex_unlock(&binder_procs_lock);
@@ -5133,6 +5203,8 @@ static void binder_deferred_release(struct binder_proc *proc)
static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
+ struct files_struct *files;
+
int defer;
do {
@@ -5149,11 +5221,23 @@ static void binder_deferred_func(struct work_struct *work)
}
mutex_unlock(&binder_deferred_lock);
+ files = NULL;
+ if (defer & BINDER_DEFERRED_PUT_FILES) {
+ mutex_lock(&proc->files_lock);
+ files = proc->files;
+ if (files)
+ proc->files = NULL;
+ mutex_unlock(&proc->files_lock);
+ }
+
if (defer & BINDER_DEFERRED_FLUSH)
binder_deferred_flush(proc);
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
+
+ if (files)
+ put_files_struct(files);
} while (proc);
}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 3ad1bcf39121..7924b56e06d2 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -149,14 +149,12 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
else {
/*
* Guard against user threads attempting to
- * free the buffer twice
+ * free the buffer when in use by kernel or
+ * after it's already been freed.
*/
- if (buffer->free_in_progress) {
- pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
- alloc->pid, current->pid, (u64)user_ptr);
- return NULL;
- }
- buffer->free_in_progress = 1;
+ if (!buffer->allow_user_free)
+ return ERR_PTR(-EPERM);
+ buffer->allow_user_free = 0;
return buffer;
}
}
@@ -458,7 +456,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
rb_erase(best_fit, &alloc->free_buffers);
buffer->free = 0;
- buffer->free_in_progress = 0;
+ buffer->allow_user_free = 0;
binder_insert_allocated_buffer_locked(alloc, buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n",
@@ -920,14 +918,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+ mm = alloc->vma_vm_mm;
+ if (!mmget_not_zero(mm))
+ goto err_mmget;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
vma = alloc->vma;
- if (vma) {
- if (!mmget_not_zero(alloc->vma_vm_mm))
- goto err_mmget;
- mm = alloc->vma_vm_mm;
- if (!down_write_trylock(&mm->mmap_sem))
- goto err_down_write_mmap_sem_failed;
- }
list_lru_isolate(lru, item);
spin_unlock(lock);
@@ -941,10 +938,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
PAGE_SIZE, NULL);
trace_binder_unmap_user_end(alloc, index);
-
- up_write(&mm->mmap_sem);
- mmput(mm);
}
+ up_write(&mm->mmap_sem);
+ mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 2dd33b6df104..a3ad7683b6f2 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -50,8 +50,7 @@ struct binder_buffer {
unsigned free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
- unsigned free_in_progress:1;
- unsigned debug_id:28;
+ unsigned debug_id:29;
struct binder_transaction *transaction;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index c528bdd3a6ae..2ffe35fbc4c4 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -56,6 +56,7 @@
#define TZ_PIL_AUTH_QDSP6_PROC 1
#define ADSP_MMAP_HEAP_ADDR 4
#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
+#define ADSP_MMAP_ADD_PAGES 0x1000
#define FASTRPC_ENOSUCH 39
#define VMID_SSC_Q6 5
#define VMID_ADSP_Q6 6
@@ -77,6 +78,16 @@
#define FASTRPC_CTX_MAGIC (0xbeeddeed)
#define FASTRPC_CTX_MAX (256)
#define FASTRPC_CTXID_MASK (0xFF0)
+#define NUM_DEVICES 2 /* adsprpc-smd, adsprpc-smd-secure */
+#define MINOR_NUM_DEV 0
+#define MINOR_NUM_SECURE_DEV 1
+#define NON_SECURE_CHANNEL 0
+#define SECURE_CHANNEL 1
+
+#define ADSP_DOMAIN_ID (0)
+#define MDSP_DOMAIN_ID (1)
+#define SDSP_DOMAIN_ID (2)
+#define CDSP_DOMAIN_ID (3)
#define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0)
@@ -91,6 +102,7 @@
#define PERF_KEYS \
"count:flush:map:copy:glink:getargs:putargs:invalidate:invoke:tid:ptr"
+#define FASTRPC_STATIC_HANDLE_KERNEL (1)
#define FASTRPC_STATIC_HANDLE_LISTENER (3)
#define FASTRPC_STATIC_HANDLE_MAX (20)
#define FASTRPC_LATENCY_CTRL_ENB (1)
@@ -178,10 +190,15 @@ struct fastrpc_file;
struct fastrpc_buf {
struct hlist_node hn;
+ struct hlist_node hn_rem;
struct fastrpc_file *fl;
void *virt;
uint64_t phys;
size_t size;
+ unsigned long dma_attr;
+ uintptr_t raddr;
+ uint32_t flags;
+ int remote;
};
struct fastrpc_ctx_lst;
@@ -280,6 +297,8 @@ struct fastrpc_channel_ctx {
int ramdumpenabled;
void *remoteheap_ramdump_dev;
struct fastrpc_glink_info link;
+ /* Indicates, if channel is restricted to secure node only */
+ int secure;
};
struct fastrpc_apps {
@@ -356,9 +375,11 @@ struct fastrpc_file {
struct hlist_node hn;
spinlock_t hlock;
struct hlist_head maps;
- struct hlist_head bufs;
+ struct hlist_head cached_bufs;
+ struct hlist_head remote_bufs;
struct fastrpc_ctx_lst clst;
struct fastrpc_session_ctx *sctx;
+ struct fastrpc_buf *init_mem;
struct fastrpc_session_ctx *secsctx;
uint32_t mode;
uint32_t profile;
@@ -379,6 +400,8 @@ struct fastrpc_file {
struct mutex map_mutex;
struct mutex fl_map_mutex;
int refcount;
+ /* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
+ int dev_minor;
};
static struct fastrpc_apps gfa;
@@ -490,10 +513,17 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
return;
if (cache) {
spin_lock(&fl->hlock);
- hlist_add_head(&buf->hn, &fl->bufs);
+ hlist_add_head(&buf->hn, &fl->cached_bufs);
spin_unlock(&fl->hlock);
return;
}
+ if (buf->remote) {
+ spin_lock(&fl->hlock);
+ hlist_del_init(&buf->hn_rem);
+ spin_unlock(&fl->hlock);
+ buf->remote = 0;
+ buf->raddr = 0;
+ }
if (!IS_ERR_OR_NULL(buf->virt)) {
int destVM[1] = {VMID_HLOS};
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
@@ -507,13 +537,13 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
hyp_assign_phys(buf->phys, buf_page_size(buf->size),
srcVM, 2, destVM, destVMperm, 1);
}
- dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
- buf->phys);
+ dma_free_attrs(fl->sctx->smmu.dev, buf->size, buf->virt,
+ buf->phys, buf->dma_attr);
}
kfree(buf);
}
-static void fastrpc_buf_list_free(struct fastrpc_file *fl)
+static void fastrpc_cached_buf_list_free(struct fastrpc_file *fl)
{
struct fastrpc_buf *buf, *free;
@@ -522,7 +552,7 @@ static void fastrpc_buf_list_free(struct fastrpc_file *fl)
free = NULL;
spin_lock(&fl->hlock);
- hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
+ hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
hlist_del_init(&buf->hn);
free = buf;
break;
@@ -533,6 +563,25 @@ static void fastrpc_buf_list_free(struct fastrpc_file *fl)
} while (free);
}
+static void fastrpc_remote_buf_list_free(struct fastrpc_file *fl)
+{
+ struct fastrpc_buf *buf, *free;
+
+ do {
+ struct hlist_node *n;
+
+ free = NULL;
+ spin_lock(&fl->hlock);
+ hlist_for_each_entry_safe(buf, n, &fl->remote_bufs, hn_rem) {
+ free = buf;
+ break;
+ }
+ spin_unlock(&fl->hlock);
+ if (free)
+ fastrpc_buf_free(free, 0);
+ } while (free);
+}
+
static void fastrpc_mmap_add(struct fastrpc_mmap *map)
{
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
@@ -592,7 +641,8 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd,
return -ENOTTY;
}
-static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size)
+static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
+ unsigned long dma_attrs)
{
struct fastrpc_apps *me = &gfa;
@@ -600,10 +650,11 @@ static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size)
pr_err("device adsprpc-mem is not initialized\n");
return -ENODEV;
}
- *vaddr = dma_alloc_coherent(me->dev, size, region_phys, GFP_KERNEL);
- if (!*vaddr) {
- pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
- (unsigned int)size);
+ *vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
+ dma_attrs);
+ if (IS_ERR_OR_NULL(*vaddr)) {
+ pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
+ current->comm, __func__, size, (*vaddr));
return -ENOMEM;
}
return 0;
@@ -675,14 +726,17 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map, uint32_t flags)
}
if (map->flags == ADSP_MMAP_HEAP_ADDR ||
map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ unsigned long dma_attrs = 0;
if (me->dev == NULL) {
pr_err("failed to free remote heap allocation\n");
return;
}
if (map->phys) {
- dma_free_coherent(me->dev, map->size,
- (void *)map->va, (dma_addr_t)map->phys);
+ dma_attrs |=
+ DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
+ dma_free_attrs(me->dev, map->size, (void *)map->va,
+ (dma_addr_t)map->phys, dma_attrs);
}
} else {
int destVM[1] = {VMID_HLOS};
@@ -754,10 +808,13 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
map->attr = attr;
if (mflags == ADSP_MMAP_HEAP_ADDR ||
mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ unsigned long dma_attrs = DMA_ATTR_SKIP_ZEROING |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+
map->apps = me;
map->fl = NULL;
VERIFY(err, !dma_alloc_memory(&region_phys, &region_vaddr,
- len));
+ len, dma_attrs));
if (err)
goto bail;
map->phys = (uintptr_t)region_phys;
@@ -876,7 +933,8 @@ bail:
}
static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
- struct fastrpc_buf **obuf)
+ unsigned long dma_attr, uint32_t rflags,
+ int remote, struct fastrpc_buf **obuf)
{
int err = 0, vmid;
struct fastrpc_buf *buf = NULL, *fr = NULL;
@@ -886,18 +944,20 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
if (err)
goto bail;
- /* find the smallest buffer that fits in the cache */
- spin_lock(&fl->hlock);
- hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
- if (buf->size >= size && (!fr || fr->size > buf->size))
- fr = buf;
- }
- if (fr)
- hlist_del_init(&fr->hn);
- spin_unlock(&fl->hlock);
- if (fr) {
- *obuf = fr;
- return 0;
+ if (!remote) {
+ /* find the smallest buffer that fits in the cache */
+ spin_lock(&fl->hlock);
+ hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
+ if (buf->size >= size && (!fr || fr->size > buf->size))
+ fr = buf;
+ }
+ if (fr)
+ hlist_del_init(&fr->hn);
+ spin_unlock(&fl->hlock);
+ if (fr) {
+ *obuf = fr;
+ return 0;
+ }
}
buf = NULL;
VERIFY(err, NULL != (buf = kzalloc(sizeof(*buf), GFP_KERNEL)));
@@ -908,17 +968,27 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
buf->virt = NULL;
buf->phys = 0;
buf->size = size;
- buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
- (void *)&buf->phys, GFP_KERNEL);
+ buf->dma_attr = dma_attr;
+ buf->flags = rflags;
+ buf->raddr = 0;
+ buf->remote = 0;
+ buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
+ (dma_addr_t *)&buf->phys,
+ GFP_KERNEL, buf->dma_attr);
if (IS_ERR_OR_NULL(buf->virt)) {
/* free cache and retry */
- fastrpc_buf_list_free(fl);
- buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
- (void *)&buf->phys, GFP_KERNEL);
+ fastrpc_cached_buf_list_free(fl);
+ buf->virt = dma_alloc_attrs(fl->sctx->smmu.dev, buf->size,
+ (dma_addr_t *)&buf->phys,
+ GFP_KERNEL, buf->dma_attr);
VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
}
- if (err)
+ if (err) {
+ err = -ENOMEM;
+ pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx\n",
+ current->comm, __func__, size);
goto bail;
+ }
if (fl->sctx->smmu.cb)
buf->phys += ((uint64_t)fl->sctx->smmu.cb << 32);
vmid = fl->apps->channel[fl->cid].vmid;
@@ -934,6 +1004,13 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, size_t size,
goto bail;
}
+ if (remote) {
+ INIT_HLIST_NODE(&buf->hn_rem);
+ spin_lock(&fl->hlock);
+ hlist_add_head(&buf->hn_rem, &fl->remote_bufs);
+ spin_unlock(&fl->hlock);
+ buf->remote = remote;
+ }
*obuf = buf;
bail:
if (err && buf)
@@ -1402,7 +1479,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
/* allocate new buffer */
if (copylen) {
- VERIFY(err, !fastrpc_buf_alloc(ctx->fl, copylen, &ctx->buf));
+ err = fastrpc_buf_alloc(ctx->fl, copylen, 0, 0, 0, &ctx->buf);
if (err)
goto bail;
}
@@ -1821,7 +1898,11 @@ static void fastrpc_init(struct fastrpc_apps *me)
init_completion(&me->channel[i].work);
init_completion(&me->channel[i].workport);
me->channel[i].sesscount = 0;
+ /* All channels are secure by default except CDSP */
+ me->channel[i].secure = SECURE_CHANNEL;
}
+ /* Set CDSP channel to non secure */
+ me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
}
static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
@@ -1841,6 +1922,14 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
if (fl->profile)
getnstimeofday(&invoket);
+ if (!kernel) {
+ VERIFY(err, invoke->handle != FASTRPC_STATIC_HANDLE_KERNEL);
+ if (err) {
+ pr_err("adsprpc: ERROR: %s: user application %s trying to send a kernel RPC message to channel %d",
+ __func__, current->comm, cid);
+ goto bail;
+ }
+ }
VERIFY(err, fl->sctx != NULL);
if (err)
@@ -1966,6 +2055,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
struct fastrpc_ioctl_init *init = &uproc->init;
struct smq_phy_page pages[1];
struct fastrpc_mmap *file = NULL, *mem = NULL;
+ struct fastrpc_buf *imem = NULL;
+ unsigned long imem_dma_attr = 0;
char *proc_name = NULL;
VERIFY(err, 0 == (err = fastrpc_channel_open(fl)));
@@ -1978,7 +2069,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
ra[0].buf.pv = (void *)&tgid;
ra[0].buf.len = sizeof(tgid);
- ioctl.inv.handle = 1;
+ ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
ioctl.inv.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
ioctl.inv.pra = ra;
ioctl.fds = NULL;
@@ -1998,6 +2089,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
remote_arg_t ra[6];
int fds[6];
int mflags = 0;
+ int memlen;
struct {
int pgid;
unsigned int namelen;
@@ -2025,16 +2117,24 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
goto bail;
}
inbuf.pageslen = 1;
- VERIFY(err, access_ok(1, (void __user *)init->mem,
- init->memlen));
- if (err)
+
+ VERIFY(err, !init->mem);
+ if (err) {
+ err = -EINVAL;
+ pr_err("adsprpc: %s: %s: ERROR: donated memory allocated in userspace\n",
+ current->comm, __func__);
goto bail;
- mutex_lock(&fl->fl_map_mutex);
- VERIFY(err, !fastrpc_mmap_create(fl, init->memfd, 0,
- init->mem, init->memlen, mflags, &mem));
- mutex_unlock(&fl->fl_map_mutex);
+ }
+ memlen = ALIGN(max(1024*1024*3, (int)init->filelen * 4),
+ 1024*1024);
+ imem_dma_attr = DMA_ATTR_EXEC_MAPPING |
+ DMA_ATTR_NO_KERNEL_MAPPING |
+ DMA_ATTR_FORCE_NON_COHERENT;
+ err = fastrpc_buf_alloc(fl, memlen, imem_dma_attr, 0, 0, &imem);
if (err)
goto bail;
+ fl->init_mem = imem;
+
inbuf.pageslen = 1;
ra[0].buf.pv = (void *)&inbuf;
ra[0].buf.len = sizeof(inbuf);
@@ -2048,8 +2148,8 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
ra[2].buf.len = inbuf.filelen;
fds[2] = init->filefd;
- pages[0].addr = mem->phys;
- pages[0].size = mem->size;
+ pages[0].addr = imem->phys;
+ pages[0].size = imem->size;
ra[3].buf.pv = (void *)pages;
ra[3].buf.len = 1 * sizeof(*pages);
fds[3] = 0;
@@ -2064,7 +2164,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
ra[5].buf.len = sizeof(inbuf.siglen);
fds[5] = 0;
- ioctl.inv.handle = 1;
+ ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
ioctl.inv.sc = REMOTE_SCALARS_MAKE(6, 4, 0);
if (uproc->attrs)
ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 6, 0);
@@ -2150,7 +2250,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
ra[2].buf.pv = (void *)pages;
ra[2].buf.len = sizeof(*pages);
fds[2] = 0;
- ioctl.inv.handle = 1;
+ ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
ioctl.inv.pra = ra;
@@ -2202,7 +2302,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
tgid = fl->tgid;
ra[0].buf.pv = (void *)&tgid;
ra[0].buf.len = sizeof(tgid);
- ioctl.inv.handle = 1;
+ ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
ioctl.inv.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
ioctl.inv.pra = ra;
ioctl.fds = NULL;
@@ -2215,7 +2315,8 @@ bail:
}
static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
- struct fastrpc_mmap *map)
+ uintptr_t va, uint64_t phys,
+ size_t size, uintptr_t *raddr)
{
struct fastrpc_ioctl_invoke_crc ioctl;
struct fastrpc_apps *me = &gfa;
@@ -2234,20 +2335,20 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
} routargs;
inargs.pid = fl->tgid;
- inargs.vaddrin = (uintptr_t)map->va;
+ inargs.vaddrin = (uintptr_t)va;
inargs.flags = flags;
inargs.num = fl->apps->compat ? num * sizeof(page) : num;
ra[0].buf.pv = (void *)&inargs;
ra[0].buf.len = sizeof(inargs);
- page.addr = map->phys;
- page.size = map->size;
+ page.addr = phys;
+ page.size = size;
ra[1].buf.pv = (void *)&page;
ra[1].buf.len = num * sizeof(page);
ra[2].buf.pv = (void *)&routargs;
ra[2].buf.len = sizeof(routargs);
- ioctl.inv.handle = 1;
+ ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
if (fl->apps->compat)
ioctl.inv.sc = REMOTE_SCALARS_MAKE(4, 2, 1);
else
@@ -2258,20 +2359,20 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
ioctl.crc = NULL;
VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
- map->raddr = (uintptr_t)routargs.vaddrout;
+ *raddr = (uintptr_t)routargs.vaddrout;
if (err)
goto bail;
if (flags == ADSP_MMAP_HEAP_ADDR) {
struct scm_desc desc = {0};
desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
- desc.args[1] = map->phys;
- desc.args[2] = map->size;
+ desc.args[1] = phys;
+ desc.args[2] = size;
desc.arginfo = SCM_ARGS(3);
err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
} else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
- VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
+ VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
hlosvm, 1, me->channel[fl->cid].rhvm.vmid,
me->channel[fl->cid].rhvm.vmperm,
me->channel[fl->cid].rhvm.vmcount));
@@ -2282,15 +2383,15 @@ bail:
return err;
}
-static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
- struct fastrpc_mmap *map)
+static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl, uint64_t phys,
+ size_t size, uint32_t flags)
{
int err = 0;
struct fastrpc_apps *me = &gfa;
int destVM[1] = {VMID_HLOS};
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
- if (map->flags == ADSP_MMAP_HEAP_ADDR) {
+ if (flags == ADSP_MMAP_HEAP_ADDR) {
struct fastrpc_ioctl_invoke_crc ioctl;
struct scm_desc desc = {0};
remote_arg_t ra[1];
@@ -2302,7 +2403,7 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
ra[0].buf.pv = (void *)&routargs;
ra[0].buf.len = sizeof(routargs);
- ioctl.inv.handle = 1;
+ ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
ioctl.inv.pra = ra;
ioctl.fds = NULL;
@@ -2316,14 +2417,14 @@ static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
if (err)
goto bail;
desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
- desc.args[1] = map->phys;
- desc.args[2] = map->size;
+ desc.args[1] = phys;
+ desc.args[2] = size;
desc.args[3] = routargs.skey;
desc.arginfo = SCM_ARGS(4);
err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
- } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
- VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
+ } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
me->channel[fl->cid].rhvm.vmid,
me->channel[fl->cid].rhvm.vmcount,
destVM, destVMperm, 1));
@@ -2335,8 +2436,8 @@ bail:
return err;
}
-static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
- struct fastrpc_mmap *map)
+static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, uintptr_t raddr,
+ uint64_t phys, size_t size, uint32_t flags)
{
struct fastrpc_ioctl_invoke_crc ioctl;
remote_arg_t ra[1];
@@ -2348,12 +2449,12 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
} inargs;
inargs.pid = fl->tgid;
- inargs.size = map->size;
- inargs.vaddrout = map->raddr;
+ inargs.size = size;
+ inargs.vaddrout = raddr;
ra[0].buf.pv = (void *)&inargs;
ra[0].buf.len = sizeof(inargs);
- ioctl.inv.handle = 1;
+ ioctl.inv.handle = FASTRPC_STATIC_HANDLE_KERNEL;
if (fl->apps->compat)
ioctl.inv.sc = REMOTE_SCALARS_MAKE(5, 1, 0);
else
@@ -2366,9 +2467,9 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
if (err)
goto bail;
- if (map->flags == ADSP_MMAP_HEAP_ADDR ||
- map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
- VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
+ if (flags == ADSP_MMAP_HEAP_ADDR ||
+ flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, phys, size, flags));
if (err)
goto bail;
}
@@ -2395,7 +2496,8 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl)
spin_unlock(&me->hlock);
if (match) {
- VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match));
+ VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, match->phys,
+ match->size, match->flags));
if (err)
goto bail;
if (me->channel[0].ramdumpenabled) {
@@ -2454,19 +2556,70 @@ static int fastrpc_mmap_remove(struct fastrpc_file *fl, uintptr_t va,
static void fastrpc_mmap_add(struct fastrpc_mmap *map);
+static inline void get_fastrpc_ioctl_mmap_64(
+ struct fastrpc_ioctl_mmap_64 *mmap64,
+ struct fastrpc_ioctl_mmap *immap)
+{
+ immap->fd = mmap64->fd;
+ immap->flags = mmap64->flags;
+ immap->vaddrin = (uintptr_t)mmap64->vaddrin;
+ immap->size = mmap64->size;
+}
+
+static inline void put_fastrpc_ioctl_mmap_64(
+ struct fastrpc_ioctl_mmap_64 *mmap64,
+ struct fastrpc_ioctl_mmap *immap)
+{
+ mmap64->vaddrout = (uint64_t)immap->vaddrout;
+}
+
+static inline void get_fastrpc_ioctl_munmap_64(
+ struct fastrpc_ioctl_munmap_64 *munmap64,
+ struct fastrpc_ioctl_munmap *imunmap)
+{
+ imunmap->vaddrout = (uintptr_t)munmap64->vaddrout;
+ imunmap->size = munmap64->size;
+}
+
static int fastrpc_internal_munmap(struct fastrpc_file *fl,
struct fastrpc_ioctl_munmap *ud)
{
int err = 0;
struct fastrpc_mmap *map = NULL;
+ struct fastrpc_buf *rbuf = NULL, *free = NULL;
+ struct hlist_node *n;
mutex_lock(&fl->map_mutex);
+
+ spin_lock(&fl->hlock);
+ hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
+ if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
+ if ((rbuf->raddr == ud->vaddrout) &&
+ (rbuf->size == ud->size)) {
+ free = rbuf;
+ break;
+ }
+ }
+ }
+ spin_unlock(&fl->hlock);
+
+ if (free) {
+ VERIFY(err, !fastrpc_munmap_on_dsp(fl, free->raddr,
+ free->phys, free->size, free->flags));
+ if (err)
+ goto bail;
+ fastrpc_buf_free(rbuf, 0);
+ mutex_unlock(&fl->map_mutex);
+ return err;
+ }
+
mutex_lock(&fl->fl_map_mutex);
VERIFY(err, !fastrpc_mmap_remove(fl, ud->vaddrout, ud->size, &map));
mutex_unlock(&fl->fl_map_mutex);
if (err)
goto bail;
- VERIFY(err, !fastrpc_munmap_on_dsp(fl, map));
+ VERIFY(err, !fastrpc_munmap_on_dsp(fl, map->raddr,
+ map->phys, map->size, map->flags));
if (err)
goto bail;
mutex_lock(&fl->fl_map_mutex);
@@ -2512,26 +2665,62 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
{
struct fastrpc_mmap *map = NULL;
+ struct fastrpc_buf *rbuf = NULL;
+ unsigned long dma_attr = 0;
+ uintptr_t raddr = 0;
int err = 0;
mutex_lock(&fl->map_mutex);
- mutex_lock(&fl->fl_map_mutex);
- if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
- ud->size, ud->flags, 1, &map)) {
+ if (ud->flags == ADSP_MMAP_ADD_PAGES) {
+ if (ud->vaddrin) {
+ err = -EINVAL;
+ pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
+ current->comm, __func__);
+ goto bail;
+ }
+ dma_attr = DMA_ATTR_EXEC_MAPPING |
+ DMA_ATTR_NO_KERNEL_MAPPING |
+ DMA_ATTR_FORCE_NON_COHERENT;
+ err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
+ 1, &rbuf);
+ if (err)
+ goto bail;
+ err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
+ rbuf->phys, rbuf->size, &raddr);
+ if (err)
+ goto bail;
+ rbuf->raddr = raddr;
+ } else {
+
+ uintptr_t va_to_dsp;
+
+ mutex_lock(&fl->fl_map_mutex);
+ if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
+ ud->size, ud->flags, 1, &map)) {
+ mutex_unlock(&fl->fl_map_mutex);
+ mutex_unlock(&fl->map_mutex);
+ return 0;
+ }
+
+ VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
+ (uintptr_t)ud->vaddrin, ud->size,
+ ud->flags, &map));
mutex_unlock(&fl->fl_map_mutex);
- mutex_unlock(&fl->map_mutex);
- return 0;
+ if (err)
+ goto bail;
+
+ if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
+ ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
+ va_to_dsp = 0;
+ else
+ va_to_dsp = (uintptr_t)map->va;
+ VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
+ map->phys, map->size, &raddr));
+ if (err)
+ goto bail;
+ map->raddr = raddr;
}
- VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
- (uintptr_t)ud->vaddrin, ud->size,
- ud->flags, &map));
- mutex_unlock(&fl->fl_map_mutex);
- if (err)
- goto bail;
- VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map));
- if (err)
- goto bail;
- ud->vaddrout = map->raddr;
+ ud->vaddrout = raddr;
bail:
if (err && map) {
mutex_lock(&fl->fl_map_mutex);
@@ -2713,8 +2902,10 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
spin_lock(&fl->hlock);
fl->file_close = 1;
spin_unlock(&fl->hlock);
+ if (!IS_ERR_OR_NULL(fl->init_mem))
+ fastrpc_buf_free(fl->init_mem, 0);
fastrpc_context_list_dtor(fl);
- fastrpc_buf_list_free(fl);
+ fastrpc_cached_buf_list_free(fl);
mutex_lock(&fl->fl_map_mutex);
do {
lmap = NULL;
@@ -2746,6 +2937,7 @@ static int fastrpc_file_free(struct fastrpc_file *fl)
}
kfree(fperf);
} while (fperf);
+ fastrpc_remote_buf_list_free(fl);
mutex_unlock(&fl->perf_mutex);
mutex_destroy(&fl->perf_mutex);
mutex_destroy(&fl->fl_map_mutex);
@@ -2912,6 +3104,9 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
chan->name);
len += scnprintf(fileinfo + len,
DEBUGFS_SIZE - len, "%s %d\n",
+ "secure:", chan->secure);
+ len += scnprintf(fileinfo + len,
+ DEBUGFS_SIZE - len, "%s %d\n",
"sesscount:", chan->sesscount);
for (j = 0; j < chan->sesscount; j++) {
sess = &chan->session[j];
@@ -2939,14 +3134,21 @@ static ssize_t fastrpc_debugfs_read(struct file *filp, char __user *buffer,
"%s %d\n\n",
"SSRCOUNT:", fl->ssrcount);
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "%s %d\n\n",
+ "DEV_MINOR:", fl->dev_minor);
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
+ "%s %d\n\n",
+ "KERNEL MEMORY ALLOCATION:", 1);
+ len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
"%s\n",
- "LIST OF BUFS:");
+ "LIST OF CACHED BUFS:");
spin_lock(&fl->hlock);
- hlist_for_each_entry_safe(buf, n, &fl->bufs, hn) {
+ hlist_for_each_entry_safe(buf, n, &fl->cached_bufs, hn) {
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
- "%s %pK %s %pK %s %llx\n", "buf:",
- buf, "buf->virt:", buf->virt,
- "buf->phys:", buf->phys);
+ "%s %pK %s %pK %s %llx %s %lx\n",
+ "buf:", buf, "buf->virt:", buf->virt,
+ "buf->phys:", buf->phys,
+ "buf->dma_attr:", buf->dma_attr);
}
len += scnprintf(fileinfo + len, DEBUGFS_SIZE - len,
"\n%s\n",
@@ -3074,6 +3276,19 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
struct fastrpc_file *fl = NULL;
struct fastrpc_apps *me = &gfa;
+ /*
+ * Indicates the device node opened
+ * MINOR_NUM_DEV or MINOR_NUM_SECURE_DEV
+ */
+ int dev_minor = MINOR(inode->i_rdev);
+
+ VERIFY(err, ((dev_minor == MINOR_NUM_DEV) ||
+ (dev_minor == MINOR_NUM_SECURE_DEV)));
+ if (err) {
+ pr_err("adsprpc: Invalid dev minor num %d\n", dev_minor);
+ return err;
+ }
+
VERIFY(err, NULL != (fl = kzalloc(sizeof(*fl), GFP_KERNEL)));
if (err)
return err;
@@ -3083,13 +3298,17 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
spin_lock_init(&fl->hlock);
INIT_HLIST_HEAD(&fl->maps);
INIT_HLIST_HEAD(&fl->perf);
- INIT_HLIST_HEAD(&fl->bufs);
+ INIT_HLIST_HEAD(&fl->cached_bufs);
+ INIT_HLIST_HEAD(&fl->remote_bufs);
INIT_HLIST_NODE(&fl->hn);
fl->sessionid = 0;
fl->tgid = current->tgid;
fl->apps = me;
fl->mode = FASTRPC_MODE_SERIAL;
fl->cid = -1;
+ fl->dev_minor = dev_minor;
+ fl->init_mem = NULL;
+
if (debugfs_file != NULL)
fl->debugfs_file = debugfs_file;
fl->qos_request = 0;
@@ -3117,6 +3336,23 @@ static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
VERIFY(err, cid < NUM_CHANNELS);
if (err)
goto bail;
+ /* Check to see if the device node is non-secure */
+ if (fl->dev_minor == MINOR_NUM_DEV) {
+ /*
+ * For non secure device node check and make sure that
+ * the channel allows non-secure access
+ * If not, bail. Session will not start.
+ * cid will remain -1 and client will not be able to
+ * invoke any other methods without failure
+ */
+ if (fl->apps->channel[cid].secure == SECURE_CHANNEL) {
+ err = -EPERM;
+ pr_err("adsprpc: GetInfo failed dev %d, cid %d, secure %d\n",
+ fl->dev_minor, cid,
+ fl->apps->channel[cid].secure);
+ goto bail;
+ }
+ }
fl->cid = cid;
fl->ssrcount = fl->apps->channel[cid].ssrcount;
VERIFY(err, !fastrpc_session_alloc_locked(
@@ -3162,6 +3398,9 @@ static int fastrpc_internal_control(struct fastrpc_file *fl,
case FASTRPC_CONTROL_SMMU:
fl->sharedcb = cp->smmu.sharedcb;
break;
+ case FASTRPC_CONTROL_KALLOC:
+ cp->kalloc.kalloc_support = 1;
+ break;
default:
err = -ENOTTY;
break;
@@ -3176,12 +3415,18 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
union {
struct fastrpc_ioctl_invoke_crc inv;
struct fastrpc_ioctl_mmap mmap;
+ struct fastrpc_ioctl_mmap_64 mmap64;
struct fastrpc_ioctl_munmap munmap;
+ struct fastrpc_ioctl_munmap_64 munmap64;
struct fastrpc_ioctl_munmap_fd munmap_fd;
struct fastrpc_ioctl_init_attrs init;
struct fastrpc_ioctl_perf perf;
struct fastrpc_ioctl_control cp;
} p;
+ union {
+ struct fastrpc_ioctl_mmap mmap;
+ struct fastrpc_ioctl_munmap munmap;
+ } i;
void *param = (char *)ioctl_param;
struct fastrpc_file *fl = (struct fastrpc_file *)file->private_data;
int size = 0, err = 0;
@@ -3245,24 +3490,27 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
goto bail;
break;
case FASTRPC_IOCTL_MMAP_64:
- K_COPY_FROM_USER(err, 0, &p.mmap, param,
- sizeof(p.mmap));
+ K_COPY_FROM_USER(err, 0, &p.mmap64, param,
+ sizeof(p.mmap64));
if (err)
goto bail;
- VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &p.mmap)));
+ get_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
+ VERIFY(err, 0 == (err = fastrpc_internal_mmap(fl, &i.mmap)));
if (err)
goto bail;
- K_COPY_TO_USER(err, 0, param, &p.mmap, sizeof(p.mmap));
+ put_fastrpc_ioctl_mmap_64(&p.mmap64, &i.mmap);
+ K_COPY_TO_USER(err, 0, param, &p.mmap64, sizeof(p.mmap64));
if (err)
goto bail;
break;
case FASTRPC_IOCTL_MUNMAP_64:
- K_COPY_FROM_USER(err, 0, &p.munmap, param,
- sizeof(p.munmap));
+ K_COPY_FROM_USER(err, 0, &p.munmap64, param,
+ sizeof(p.munmap64));
if (err)
goto bail;
+ get_fastrpc_ioctl_munmap_64(&p.munmap64, &i.munmap);
VERIFY(err, 0 == (err = fastrpc_internal_munmap(fl,
- &p.munmap)));
+ &i.munmap)));
if (err)
goto bail;
break;
@@ -3339,6 +3587,11 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
VERIFY(err, 0 == (err = fastrpc_internal_control(fl, &p.cp)));
if (err)
goto bail;
+ if (p.cp.req == FASTRPC_CONTROL_KALLOC) {
+ K_COPY_TO_USER(err, 0, param, &p.cp, sizeof(p.cp));
+ if (err)
+ goto bail;
+ }
break;
case FASTRPC_IOCTL_GETINFO:
K_COPY_FROM_USER(err, 0, &info, param, sizeof(info));
@@ -3707,6 +3960,24 @@ bail:
}
}
+static void configure_secure_channels(uint32_t secure_domains)
+{
+ struct fastrpc_apps *me = &gfa;
+ int ii = 0;
+ /*
+ * secure_domains contains the bitmask of the secure channels
+ * Bit 0 - ADSP
+ * Bit 1 - MDSP
+ * Bit 2 - SLPI
+ * Bit 3 - CDSP
+ */
+ for (ii = ADSP_DOMAIN_ID; ii <= CDSP_DOMAIN_ID; ++ii) {
+ int secure = (secure_domains >> ii) & 0x01;
+
+ me->channel[ii].secure = secure;
+ }
+}
+
static int fastrpc_probe(struct platform_device *pdev)
{
int err = 0;
@@ -3718,7 +3989,7 @@ static int fastrpc_probe(struct platform_device *pdev)
struct cma *cma;
uint32_t val;
int ret = 0;
-
+ uint32_t secure_domains;
if (of_device_is_compatible(dev->of_node,
"qcom,msm-fastrpc-compute")) {
@@ -3728,6 +3999,16 @@ static int fastrpc_probe(struct platform_device *pdev)
of_property_read_u32(dev->of_node, "qcom,rpc-latency-us",
&me->latency);
+ if (of_get_property(dev->of_node,
+ "qcom,secure-domains", NULL) != NULL) {
+ VERIFY(err, !of_property_read_u32(dev->of_node,
+ "qcom,secure-domains",
+ &secure_domains));
+ if (!err)
+ configure_secure_channels(secure_domains);
+ else
+ pr_info("adsprpc: unable to read the domain configuration from dts\n");
+ }
}
if (of_device_is_compatible(dev->of_node,
"qcom,msm-fastrpc-compute-cb"))
@@ -3915,6 +4196,7 @@ static int __init fastrpc_device_init(void)
{
struct fastrpc_apps *me = &gfa;
struct device *dev = NULL;
+ struct device *secure_dev = NULL;
int err = 0, i;
memset(me, 0, sizeof(*me));
@@ -3932,7 +4214,7 @@ static int __init fastrpc_device_init(void)
cdev_init(&me->cdev, &fops);
me->cdev.owner = THIS_MODULE;
VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
- 1));
+ NUM_DEVICES));
if (err)
goto cdev_init_bail;
me->class = class_create(THIS_MODULE, "fastrpc");
@@ -3940,14 +4222,30 @@ static int __init fastrpc_device_init(void)
if (err)
goto class_create_bail;
me->compat = (fops.compat_ioctl == NULL) ? 0 : 1;
+
+ /*
+ * Create devices and register with sysfs
+ * Create first device with minor number 0
+ */
dev = device_create(me->class, NULL,
- MKDEV(MAJOR(me->dev_no), 0),
- NULL, gcinfo[0].name);
+ MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV),
+ NULL, DEVICE_NAME);
VERIFY(err, !IS_ERR_OR_NULL(dev));
if (err)
goto device_create_bail;
+
+ /* Create secure device with minor number for secure device */
+ secure_dev = device_create(me->class, NULL,
+ MKDEV(MAJOR(me->dev_no), MINOR_NUM_SECURE_DEV),
+ NULL, DEVICE_NAME_SECURE);
+ VERIFY(err, !IS_ERR_OR_NULL(secure_dev));
+ if (err)
+ goto device_create_bail;
+
for (i = 0; i < NUM_CHANNELS; i++) {
- me->channel[i].dev = dev;
+ me->channel[i].dev = secure_dev;
+ if (i == CDSP_DOMAIN_ID)
+ me->channel[i].dev = dev;
me->channel[i].ssrcount = 0;
me->channel[i].prevssrcount = 0;
me->channel[i].issubsystemup = 1;
@@ -3972,7 +4270,11 @@ device_create_bail:
&me->channel[i].nb);
}
if (!IS_ERR_OR_NULL(dev))
- device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
+ device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
+ MINOR_NUM_DEV));
+ if (!IS_ERR_OR_NULL(secure_dev))
+ device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
+ MINOR_NUM_SECURE_DEV));
class_destroy(me->class);
class_create_bail:
cdev_del(&me->cdev);
@@ -3994,10 +4296,15 @@ static void __exit fastrpc_device_exit(void)
for (i = 0; i < NUM_CHANNELS; i++) {
if (!gcinfo[i].name)
continue;
- device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
subsys_notif_unregister_notifier(me->channel[i].handle,
&me->channel[i].nb);
}
+
+ /* Destroy the secure and non secure devices */
+ device_destroy(me->class, MKDEV(MAJOR(me->dev_no), MINOR_NUM_DEV));
+ device_destroy(me->class, MKDEV(MAJOR(me->dev_no),
+ MINOR_NUM_SECURE_DEV));
+
class_destroy(me->class);
cdev_del(&me->cdev);
unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index 804cedade655..ea7967c55aa1 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -126,16 +126,22 @@ struct compat_fastrpc_ioctl_perf { /* kernel performance data */
compat_uptr_t keys;
};
-#define FASTRPC_CONTROL_LATENCY (1)
+#define FASTRPC_CONTROL_LATENCY (1)
struct compat_fastrpc_ctrl_latency {
compat_uint_t enable; /* latency control enable */
compat_uint_t level; /* level of control */
};
+#define FASTRPC_CONTROL_KALLOC (3)
+struct compat_fastrpc_ctrl_kalloc {
+ compat_uint_t kalloc_support; /* Remote memory allocation from kernel */
+};
+
struct compat_fastrpc_ioctl_control {
compat_uint_t req;
union {
struct compat_fastrpc_ctrl_latency lp;
+ struct compat_fastrpc_ctrl_kalloc kalloc;
};
};
@@ -528,6 +534,7 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
{
struct compat_fastrpc_ioctl_control __user *ctrl32;
struct fastrpc_ioctl_control __user *ctrl;
+ compat_uptr_t p;
ctrl32 = compat_ptr(arg);
VERIFY(err, NULL != (ctrl = compat_alloc_user_space(
@@ -540,6 +547,15 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
return err;
err = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_CONTROL,
(unsigned long)ctrl);
+ if (err)
+ return err;
+ err = get_user(p, &ctrl32->req);
+ if (err)
+ return err;
+ if (p == FASTRPC_CONTROL_KALLOC) {
+ err = get_user(p, &ctrl->kalloc.kalloc_support);
+ err |= put_user(p, &ctrl32->kalloc.kalloc_support);
+ }
return err;
}
case COMPAT_FASTRPC_IOCTL_GETPERF:
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index 1da67279cef9..a3db63368e9b 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -36,6 +36,7 @@
#define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp"
#define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp"
#define DEVICE_NAME "adsprpc-smd"
+#define DEVICE_NAME_SECURE "adsprpc-smd-secure"
/* Set for buffers that have no virtual mapping in userspace */
#define FASTRPC_ATTR_NOVA 0x1
@@ -217,7 +218,7 @@ struct fastrpc_ioctl_mmap {
};
struct fastrpc_ioctl_mmap_64 {
- int fd; /* ion fd */
+ int fd; /* ion fd */
uint32_t flags; /* flags for dsp to map with */
uint64_t vaddrin; /* optional virtual address */
size_t size; /* size */
@@ -237,20 +238,28 @@ struct fastrpc_ioctl_perf { /* kernel performance data */
uintptr_t keys;
};
-#define FASTRPC_CONTROL_LATENCY (1)
+#define FASTRPC_CONTROL_LATENCY (1)
struct fastrpc_ctrl_latency {
- uint32_t enable; //!latency control enable
- uint32_t level; //!level of control
+ uint32_t enable; /* latency control enable */
+ uint32_t level; /* level of control */
};
-#define FASTRPC_CONTROL_SMMU (2)
+
+#define FASTRPC_CONTROL_SMMU (2)
struct fastrpc_ctrl_smmu {
uint32_t sharedcb;
};
+
+#define FASTRPC_CONTROL_KALLOC (3)
+struct fastrpc_ctrl_kalloc {
+ uint32_t kalloc_support; /* Remote memory allocation from kernel */
+};
+
struct fastrpc_ioctl_control {
uint32_t req;
union {
struct fastrpc_ctrl_latency lp;
struct fastrpc_ctrl_smmu smmu;
+ struct fastrpc_ctrl_kalloc kalloc;
};
};
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 286418f9436a..0a36b78e321b 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2303,8 +2303,8 @@ struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
pid_struct = find_get_pid(entry->tgid);
if (!pid_struct) {
DIAG_LOG(DIAG_DEBUG_DCI,
- "diag: valid pid doesn't exist for pid = %d\n",
- entry->tgid);
+ "diag: Exited pid (%d) doesn't match dci client of pid (%d)\n",
+ tgid, entry->tgid);
continue;
}
task_s = get_pid_task(pid_struct, PIDTYPE_PID);
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index f27425be85c5..fa0af0176894 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -1772,6 +1772,7 @@ static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
mask_info->update_buf = kzalloc(update_buf_len, GFP_KERNEL);
if (!mask_info->update_buf) {
kfree(mask_info->ptr);
+ mask_info->ptr = NULL;
return -ENOMEM;
}
kmemleak_not_leak(mask_info->update_buf);
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 131ecba37ae3..18f8e2b11cbf 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -552,8 +552,8 @@ static int diagchar_close(struct inode *inode, struct file *file)
{
int ret;
- DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
- current->comm);
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: %s process exit with pid = %d\n",
+ current->comm, current->tgid);
ret = diag_remove_client_entry(file);
return ret;
@@ -3259,6 +3259,8 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
int exit_stat = 0;
int write_len = 0;
struct diag_md_session_t *session_info = NULL;
+ struct pid *pid_struct = NULL;
+ struct task_struct *task_s = NULL;
mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
@@ -3503,8 +3505,19 @@ exit:
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl,
track);
- if (entry->client->tgid != current->tgid)
+ pid_struct = find_get_pid(entry->tgid);
+ if (!pid_struct)
continue;
+ task_s = get_pid_task(pid_struct, PIDTYPE_PID);
+ if (!task_s) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "diag: valid task doesn't exist for pid = %d\n",
+ entry->tgid);
+ continue;
+ }
+ if (task_s == entry->client)
+ if (entry->client->tgid != current->tgid)
+ continue;
if (!entry->in_service)
continue;
if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 2396557039de..e774afc2dd86 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -523,7 +523,12 @@ static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
}
if (range->ssid_last >= mask->ssid_last) {
temp_range = range->ssid_last - mask->ssid_first + 1;
- mask->ssid_last = range->ssid_last;
+ if (temp_range > MAX_SSID_PER_RANGE) {
+ temp_range = MAX_SSID_PER_RANGE;
+ mask->ssid_last = mask->ssid_first + temp_range - 1;
+ } else
+ mask->ssid_last = range->ssid_last;
+ mask->ssid_last_tools = mask->ssid_last;
mask->range = temp_range;
}
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 00989a8cca5e..d89e4a1a7b4d 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -336,6 +336,39 @@ static int clk_rcg2_determine_rate(struct clk_hw *hw,
return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req);
}
+static bool clk_rcg2_current_config(struct clk_rcg2 *rcg,
+ const struct freq_tbl *f)
+{
+ struct clk_hw *hw = &rcg->clkr.hw;
+ u32 cfg, mask, new_cfg;
+ int index;
+
+ if (rcg->mnd_width) {
+ mask = BIT(rcg->mnd_width) - 1;
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &cfg);
+ if ((cfg & mask) != (f->m & mask))
+ return false;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &cfg);
+ if ((cfg & mask) != (~(f->n - f->m) & mask))
+ return false;
+ }
+
+ mask = (BIT(rcg->hid_width) - 1) | CFG_SRC_SEL_MASK;
+
+ index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+
+ new_cfg = ((f->pre_div << CFG_SRC_DIV_SHIFT) |
+ (rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT)) & mask;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+
+ if (new_cfg != (cfg & mask))
+ return false;
+
+ return true;
+}
+
static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
u32 cfg, mask;
@@ -900,6 +933,8 @@ static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
for (i = 0; i < num_parents; i++) {
if (cfg == rcg->parent_map[i].cfg) {
f.src = rcg->parent_map[i].src;
+ if (clk_rcg2_current_config(rcg, &f))
+ return 0;
return clk_rcg2_configure(rcg, &f);
}
}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 7b23db46c121..aa8dccc88dff 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -95,6 +95,9 @@
#define PLL_PLL_INT_GAIN_IFILT_BAND_1 0x15c
#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x164
#define PLL_FASTLOCK_EN_BAND 0x16c
+#define PLL_FREQ_TUNE_ACCUM_INIT_LOW 0x170
+#define PLL_FREQ_TUNE_ACCUM_INIT_MID 0x174
+#define PLL_FREQ_TUNE_ACCUM_INIT_HIGH 0x178
#define PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x17c
#define PLL_PLL_LOCK_OVERRIDE 0x180
#define PLL_PLL_LOCK_DELAY 0x184
@@ -112,6 +115,7 @@
#define PHY_CMN_RBUF_CTRL 0x01c
#define PHY_CMN_PLL_CNTRL 0x038
#define PHY_CMN_CTRL_0 0x024
+#define PHY_CMN_CTRL_2 0x02c
/* Bit definition of SSC control registers */
#define SSC_CENTER BIT(0)
@@ -123,6 +127,43 @@
#define SSC_START BIT(6)
#define SSC_START_MUX BIT(7)
+/* Dynamic Refresh Control Registers */
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0 (0x014)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1 (0x018)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2 (0x01C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3 (0x020)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4 (0x024)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5 (0x028)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6 (0x02C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7 (0x030)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8 (0x034)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9 (0x038)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10 (0x03C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11 (0x040)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12 (0x044)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13 (0x048)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14 (0x04C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15 (0x050)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16 (0x054)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17 (0x058)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18 (0x05C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19 (0x060)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20 (0x064)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21 (0x068)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22 (0x06C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23 (0x070)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24 (0x074)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25 (0x078)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26 (0x07C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27 (0x080)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28 (0x084)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29 (0x088)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30 (0x08C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31 (0x090)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR (0x094)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2 (0x098)
+
+#define DSI_PHY_TO_PLL_OFFSET (0x600)
enum {
DSI_PLL_0,
DSI_PLL_1,
@@ -644,6 +685,7 @@ static int vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
rsc->vco_current_rate = rate;
rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+ rsc->dfps_trigger = false;
rc = mdss_pll_resource_enable(rsc, true);
if (rc) {
@@ -674,6 +716,237 @@ static int vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int dsi_pll_read_stored_trim_codes(struct mdss_pll_resources *pll_res,
+ unsigned long vco_clk_rate)
+{
+ int i;
+ bool found = false;
+
+ if (!pll_res->dfps)
+ return -EINVAL;
+
+ for (i = 0; i < pll_res->dfps->vco_rate_cnt; i++) {
+ struct dfps_codes_info *codes_info =
+ &pll_res->dfps->codes_dfps[i];
+
+ pr_debug("valid=%d vco_rate=%d, code %d %d %d\n",
+ codes_info->is_valid, codes_info->clk_rate,
+ codes_info->pll_codes.pll_codes_1,
+ codes_info->pll_codes.pll_codes_2,
+ codes_info->pll_codes.pll_codes_3);
+
+ if (vco_clk_rate != codes_info->clk_rate &&
+ codes_info->is_valid)
+ continue;
+
+ pll_res->cache_pll_trim_codes[0] =
+ codes_info->pll_codes.pll_codes_1;
+ pll_res->cache_pll_trim_codes[1] =
+ codes_info->pll_codes.pll_codes_2;
+ pll_res->cache_pll_trim_codes[2] =
+ codes_info->pll_codes.pll_codes_3;
+ found = true;
+ break;
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ pr_debug("trim_code_0=0x%x trim_code_1=0x%x trim_code_2=0x%x\n",
+ pll_res->cache_pll_trim_codes[0],
+ pll_res->cache_pll_trim_codes[1],
+ pll_res->cache_pll_trim_codes[2]);
+
+ return 0;
+}
+
+static void shadow_dsi_pll_dynamic_refresh_10nm(struct dsi_pll_10nm *pll,
+ struct mdss_pll_resources *rsc)
+{
+ u32 data;
+ u32 offset = DSI_PHY_TO_PLL_OFFSET;
+ u32 upper_addr = 0;
+ struct dsi_pll_regs *reg = &pll->reg_setup;
+
+ data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+ data &= ~BIT(5);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL0,
+ PHY_CMN_CLK_CFG1, PHY_CMN_PLL_CNTRL, data, 0);
+ upper_addr |= (upper_8_bit(PHY_CMN_CLK_CFG1) << 0);
+ upper_addr |= (upper_8_bit(PHY_CMN_PLL_CNTRL) << 1);
+
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL1,
+ PHY_CMN_RBUF_CTRL,
+ (PLL_DECIMAL_DIV_START_1 + offset),
+ 0, reg->decimal_div_start);
+ upper_addr |= (upper_8_bit(PHY_CMN_RBUF_CTRL) << 2);
+ upper_addr |= (upper_8_bit(PLL_DECIMAL_DIV_START_1 + offset) << 3);
+
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL2,
+ (PLL_FRAC_DIV_START_LOW_1 + offset),
+ (PLL_FRAC_DIV_START_MID_1 + offset),
+ reg->frac_div_start_low, reg->frac_div_start_mid);
+ upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_LOW_1 + offset) << 4);
+ upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_MID_1 + offset) << 5);
+
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL3,
+ (PLL_FRAC_DIV_START_HIGH_1 + offset),
+ (PLL_PLL_PROP_GAIN_RATE_1 + offset),
+ reg->frac_div_start_high, reg->pll_prop_gain_rate);
+ upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_HIGH_1 + offset) << 6);
+ upper_addr |= (upper_8_bit(PLL_PLL_PROP_GAIN_RATE_1 + offset) << 7);
+
+ data = MDSS_PLL_REG_R(rsc->pll_base, PLL_PLL_OUTDIV_RATE) & 0x03;
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL4,
+ (PLL_PLL_OUTDIV_RATE + offset),
+ (PLL_FREQ_TUNE_ACCUM_INIT_LOW + offset),
+ data, 0);
+ upper_addr |= (upper_8_bit(PLL_PLL_OUTDIV_RATE + offset) << 8);
+ upper_addr |= (upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_LOW + offset) << 9);
+
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL5,
+ (PLL_FREQ_TUNE_ACCUM_INIT_MID + offset),
+ (PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset),
+ rsc->cache_pll_trim_codes[1],
+ rsc->cache_pll_trim_codes[0]);
+ upper_addr |=
+ (upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_MID + offset) << 10);
+ upper_addr |=
+ (upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_HIGH + offset) << 11);
+
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL6,
+ (PLL_FREQ_TUNE_ACCUM_INIT_MUX + offset),
+ (PLL_PLL_BAND_SET_RATE_1 + offset),
+ 0x07, rsc->cache_pll_trim_codes[2]);
+ upper_addr |=
+ (upper_8_bit(PLL_FREQ_TUNE_ACCUM_INIT_MUX + offset) << 12);
+ upper_addr |= (upper_8_bit(PLL_PLL_BAND_SET_RATE_1 + offset) << 13);
+
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL7,
+ (PLL_CALIBRATION_SETTINGS + offset),
+ (PLL_BAND_SEL_CAL_SETTINGS + offset), 0x44, 0x3a);
+ upper_addr |= (upper_8_bit(PLL_CALIBRATION_SETTINGS + offset) << 14);
+ upper_addr |= (upper_8_bit(PLL_BAND_SEL_CAL_SETTINGS + offset) << 15);
+
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL8,
+ (PLL_PLL_LOCKDET_RATE_1 + offset),
+ (PLL_PLL_LOCK_DELAY + offset), 0x10, 0x06);
+ upper_addr |= (upper_8_bit(PLL_PLL_LOCKDET_RATE_1 + offset) << 16);
+ upper_addr |= (upper_8_bit(PLL_PLL_LOCK_DELAY + offset) << 17);
+
+ data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL17,
+ PHY_CMN_CTRL_2, PHY_CMN_CLK_CFG0, 0x40, data);
+ if (rsc->slave)
+ MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+ PHY_CMN_CLK_CFG0, PHY_CMN_CTRL_0,
+ data, 0x7f);
+
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL18,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+ /* Dummy register writes */
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL19,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL20,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL21,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL22,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL23,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL24,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL25,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL26,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+ PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+
+ /* Registers to configure after PLL enable delay */
+ data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1) | BIT(5);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+ PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL, data, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+ PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1, data, data);
+ if (rsc->slave) {
+ data = MDSS_PLL_REG_R(rsc->slave->phy_base, PHY_CMN_CLK_CFG1) |
+ BIT(5);
+ MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+ PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL,
+ data, 0x01);
+ MDSS_DYN_PLL_REG_W(rsc->slave->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+ PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1,
+ data, data);
+ }
+
+ MDSS_PLL_REG_W(rsc->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, upper_addr);
+ MDSS_PLL_REG_W(rsc->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0);
+ wmb(); /* commit register writes */
+}
+
+static int shadow_vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int rc;
+ struct dsi_pll_10nm *pll;
+ struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+ struct mdss_pll_resources *rsc = vco->priv;
+
+ if (!rsc) {
+ pr_err("pll resource not found\n");
+ return -EINVAL;
+ }
+
+ pll = rsc->priv;
+ if (!pll) {
+ pr_err("pll configuration not found\n");
+ return -EINVAL;
+ }
+
+ rc = dsi_pll_read_stored_trim_codes(rsc, rate);
+ if (rc) {
+ pr_err("cannot find pll codes rate=%ld\n", rate);
+ return -EINVAL;
+ }
+ pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+ rsc->vco_current_rate = rate;
+ rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
+ rsc->index, rc);
+ return rc;
+ }
+
+ dsi_pll_setup_config(pll, rsc);
+
+ dsi_pll_calc_dec_frac(pll, rsc);
+
+ /* program dynamic refresh control registers */
+ shadow_dsi_pll_dynamic_refresh_10nm(pll, rsc);
+
+ /* update cached vco rate */
+ rsc->vco_cached_rate = rate;
+ rsc->dfps_trigger = true;
+
+ mdss_pll_resource_enable(rsc, false);
+
+ return 0;
+}
+
static int dsi_pll_10nm_lock_status(struct mdss_pll_resources *pll)
{
int rc;
@@ -739,7 +1012,7 @@ static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
phy_reg_update_bits_sub(rsc, PHY_CMN_CLK_CFG1, 0x03, rsc->cached_cfg1);
if (rsc->slave)
phy_reg_update_bits_sub(rsc->slave, PHY_CMN_CLK_CFG1,
- 0x03, rsc->cached_cfg1);
+ 0x03, rsc->slave->cached_cfg1);
wmb(); /* ensure dsiclk_sel is always programmed before pll start */
/* Start PLL */
@@ -789,6 +1062,7 @@ static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
}
rsc->handoff_resources = false;
+ rsc->dfps_trigger = false;
pr_debug("stop PLL (%d)\n", rsc->index);
@@ -840,16 +1114,18 @@ static void vco_10nm_unprepare(struct clk_hw *hw)
/*
* During unprepare in continuous splash use case we want driver
* to pick all dividers instead of retaining bootloader configurations.
+ * Also handle use cases where dynamic refresh triggered before
+ * first suspend/resume.
*/
- if (!pll->handoff_resources) {
+ if (!pll->handoff_resources || pll->dfps_trigger) {
pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base,
- PHY_CMN_CLK_CFG0);
+ PHY_CMN_CLK_CFG0);
pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base,
- PLL_PLL_OUTDIV_RATE);
+ PLL_PLL_OUTDIV_RATE);
pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
- pll->cached_cfg1, pll->cached_outdiv);
+ pll->cached_cfg1, pll->cached_outdiv);
- pll->vco_cached_rate = clk_hw_get_rate(hw);
+ pll->vco_cached_rate = clk_get_rate(hw->clk);
}
/*
@@ -859,9 +1135,15 @@ static void vco_10nm_unprepare(struct clk_hw *hw)
* does not change.For such usecases, we need to ensure that the cached
* value is programmed prior to PLL being locked
*/
- if (pll->handoff_resources)
+ if (pll->handoff_resources) {
pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base,
- PHY_CMN_CLK_CFG1);
+ PHY_CMN_CLK_CFG1);
+ if (pll->slave)
+ pll->slave->cached_cfg1 =
+ MDSS_PLL_REG_R(pll->slave->phy_base,
+ PHY_CMN_CLK_CFG1);
+ }
+
dsi_pll_disable(vco);
mdss_pll_resource_enable(pll, false);
}
@@ -889,7 +1171,7 @@ static int vco_10nm_prepare(struct clk_hw *hw)
}
if ((pll->vco_cached_rate != 0) &&
- (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+ (pll->vco_cached_rate == clk_get_rate(hw->clk))) {
rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
pll->vco_cached_rate);
if (rc) {
@@ -902,6 +1184,9 @@ static int vco_10nm_prepare(struct clk_hw *hw)
pll->cached_cfg1);
MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
pll->cached_cfg0);
+ if (pll->slave)
+ MDSS_PLL_REG_W(pll->slave->phy_base, PHY_CMN_CLK_CFG0,
+ pll->cached_cfg0);
MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
pll->cached_outdiv);
}
@@ -1037,6 +1322,14 @@ static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
reg_val &= ~0xF0;
reg_val |= (div << 4);
MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+
+ /*
+ * cache the current parent index for cases where parent
+ * is not changing but rate is changing. In that case
+ * clock framework won't call parent_set and hence dsiclk_sel
+ * bit won't be programmed. e.g. dfps update use case.
+ */
+ pll->cached_cfg0 = reg_val;
}
static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
@@ -1174,6 +1467,12 @@ static const struct clk_ops clk_ops_vco_10nm = {
.unprepare = vco_10nm_unprepare,
};
+static const struct clk_ops clk_ops_shadow_vco_10nm = {
+ .recalc_rate = vco_10nm_recalc_rate,
+ .set_rate = shadow_vco_10nm_set_rate,
+ .round_rate = vco_10nm_round_rate,
+};
+
static struct regmap_bus mdss_mux_regmap_bus = {
.reg_write = mdss_set_mux_sel,
.reg_read = mdss_get_mux_sel,
@@ -1248,6 +1547,19 @@ static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
},
};
+static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1000000000UL,
+ .max_rate = 3500000000UL,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_vco_clk",
+ .parent_names = (const char *[]){"bi_tcxo"},
+ .num_parents = 1,
+ .ops = &clk_ops_shadow_vco_10nm,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
.ref_clk_rate = 19200000UL,
.min_rate = 1000000000UL,
@@ -1261,6 +1573,19 @@ static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
},
};
+static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1000000000UL,
+ .max_rate = 3500000000UL,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_vco_clk",
+ .parent_names = (const char *[]){"bi_tcxo"},
+ .num_parents = 1,
+ .ops = &clk_ops_shadow_vco_10nm,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
static struct clk_regmap_div dsi0pll_pll_out_div = {
.reg = PLL_PLL_OUTDIV_RATE,
.shift = 0,
@@ -1277,6 +1602,23 @@ static struct clk_regmap_div dsi0pll_pll_out_div = {
},
};
+static struct clk_regmap_div dsi0pll_shadow_pll_out_div = {
+ .reg = PLL_PLL_OUTDIV_RATE,
+ .shift = 0,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_pll_out_div",
+ .parent_names = (const char *[]){
+ "dsi0pll_shadow_vco_clk"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
static struct clk_regmap_div dsi1pll_pll_out_div = {
.reg = PLL_PLL_OUTDIV_RATE,
.shift = 0,
@@ -1293,6 +1635,23 @@ static struct clk_regmap_div dsi1pll_pll_out_div = {
},
};
+static struct clk_regmap_div dsi1pll_shadow_pll_out_div = {
+ .reg = PLL_PLL_OUTDIV_RATE,
+ .shift = 0,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_pll_out_div",
+ .parent_names = (const char *[]){
+ "dsi1pll_shadow_vco_clk"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
static struct clk_regmap_div dsi0pll_bitclk_src = {
.shift = 0,
.width = 4,
@@ -1307,6 +1666,21 @@ static struct clk_regmap_div dsi0pll_bitclk_src = {
},
};
+static struct clk_regmap_div dsi0pll_shadow_bitclk_src = {
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_bitclk_src",
+ .parent_names = (const char *[]){
+ "dsi0pll_shadow_pll_out_div"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
static struct clk_regmap_div dsi1pll_bitclk_src = {
.shift = 0,
.width = 4,
@@ -1321,6 +1695,21 @@ static struct clk_regmap_div dsi1pll_bitclk_src = {
},
};
+static struct clk_regmap_div dsi1pll_shadow_bitclk_src = {
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_bitclk_src",
+ .parent_names = (const char *[]){
+ "dsi1pll_shadow_pll_out_div"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
static struct clk_fixed_factor dsi0pll_post_vco_div = {
.div = 4,
.mult = 1,
@@ -1328,7 +1717,19 @@ static struct clk_fixed_factor dsi0pll_post_vco_div = {
.name = "dsi0pll_post_vco_div",
.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
.num_parents = 1,
- .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dsi0pll_shadow_post_vco_div = {
+ .div = 4,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_post_vco_div",
+ .parent_names = (const char *[]){"dsi0pll_shadow_pll_out_div"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
@@ -1340,7 +1741,19 @@ static struct clk_fixed_factor dsi1pll_post_vco_div = {
.name = "dsi1pll_post_vco_div",
.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
.num_parents = 1,
- .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_fixed_factor dsi1pll_shadow_post_vco_div = {
+ .div = 4,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_post_vco_div",
+ .parent_names = (const char *[]){"dsi1pll_shadow_pll_out_div"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
.ops = &clk_fixed_factor_ops,
},
};
@@ -1357,6 +1770,18 @@ static struct clk_fixed_factor dsi0pll_byteclk_src = {
},
};
+static struct clk_fixed_factor dsi0pll_shadow_byteclk_src = {
+ .div = 8,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_byteclk_src",
+ .parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
static struct clk_fixed_factor dsi1pll_byteclk_src = {
.div = 8,
.mult = 1,
@@ -1369,6 +1794,18 @@ static struct clk_fixed_factor dsi1pll_byteclk_src = {
},
};
+static struct clk_fixed_factor dsi1pll_shadow_byteclk_src = {
+ .div = 8,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_byteclk_src",
+ .parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
static struct clk_fixed_factor dsi0pll_post_bit_div = {
.div = 2,
.mult = 1,
@@ -1381,6 +1818,18 @@ static struct clk_fixed_factor dsi0pll_post_bit_div = {
},
};
+static struct clk_fixed_factor dsi0pll_shadow_post_bit_div = {
+ .div = 2,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_post_bit_div",
+ .parent_names = (const char *[]){"dsi0pll_shadow_bitclk_src"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
static struct clk_fixed_factor dsi1pll_post_bit_div = {
.div = 2,
.mult = 1,
@@ -1393,15 +1842,29 @@ static struct clk_fixed_factor dsi1pll_post_bit_div = {
},
};
+static struct clk_fixed_factor dsi1pll_shadow_post_bit_div = {
+ .div = 2,
+ .mult = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_post_bit_div",
+ .parent_names = (const char *[]){"dsi1pll_shadow_bitclk_src"},
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
static struct clk_regmap_mux dsi0pll_byteclk_mux = {
.shift = 0,
.width = 1,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0_phy_pll_out_byteclk",
- .parent_names = (const char *[]){"dsi0pll_byteclk_src"},
- .num_parents = 1,
- .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .parent_names = (const char *[]){"dsi0pll_byteclk_src",
+ "dsi0pll_shadow_byteclk_src"},
+ .num_parents = 2,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
@@ -1413,9 +1876,11 @@ static struct clk_regmap_mux dsi1pll_byteclk_mux = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1_phy_pll_out_byteclk",
- .parent_names = (const char *[]){"dsi1pll_byteclk_src"},
- .num_parents = 1,
- .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .parent_names = (const char *[]){"dsi1pll_byteclk_src",
+ "dsi1pll_shadow_byteclk_src"},
+ .num_parents = 2,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
@@ -1439,6 +1904,25 @@ static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
},
};
+static struct clk_regmap_mux dsi0pll_shadow_pclk_src_mux = {
+ .reg = PHY_CMN_CLK_CFG1,
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_pclk_src_mux",
+ .parent_names = (const char *[]){
+ "dsi0pll_shadow_bitclk_src",
+ "dsi0pll_shadow_post_bit_div",
+ "dsi0pll_shadow_pll_out_div",
+ "dsi0pll_shadow_post_vco_div"},
+ .num_parents = 4,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
.reg = PHY_CMN_CLK_CFG1,
.shift = 0,
@@ -1457,6 +1941,25 @@ static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
},
};
+static struct clk_regmap_mux dsi1pll_shadow_pclk_src_mux = {
+ .reg = PHY_CMN_CLK_CFG1,
+ .shift = 0,
+ .width = 2,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_pclk_src_mux",
+ .parent_names = (const char *[]){
+ "dsi1pll_shadow_bitclk_src",
+ "dsi1pll_shadow_post_bit_div",
+ "dsi1pll_shadow_pll_out_div",
+ "dsi1pll_shadow_post_vco_div"},
+ .num_parents = 4,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
static struct clk_regmap_div dsi0pll_pclk_src = {
.shift = 0,
.width = 4,
@@ -1472,6 +1975,21 @@ static struct clk_regmap_div dsi0pll_pclk_src = {
},
};
+static struct clk_regmap_div dsi0pll_shadow_pclk_src = {
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi0pll_shadow_pclk_src",
+ .parent_names = (const char *[]){
+ "dsi0pll_shadow_pclk_src_mux"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
static struct clk_regmap_div dsi1pll_pclk_src = {
.shift = 0,
.width = 4,
@@ -1487,15 +2005,32 @@ static struct clk_regmap_div dsi1pll_pclk_src = {
},
};
+static struct clk_regmap_div dsi1pll_shadow_pclk_src = {
+ .shift = 0,
+ .width = 4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "dsi1pll_shadow_pclk_src",
+ .parent_names = (const char *[]){
+ "dsi1pll_shadow_pclk_src_mux"},
+ .num_parents = 1,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .ops = &clk_regmap_div_ops,
+ },
+ },
+};
+
static struct clk_regmap_mux dsi0pll_pclk_mux = {
.shift = 0,
.width = 1,
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi0_phy_pll_out_dsiclk",
- .parent_names = (const char *[]){"dsi0pll_pclk_src"},
- .num_parents = 1,
- .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .parent_names = (const char *[]){"dsi0pll_pclk_src",
+ "dsi0pll_shadow_pclk_src"},
+ .num_parents = 2,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
@@ -1507,9 +2042,11 @@ static struct clk_regmap_mux dsi1pll_pclk_mux = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "dsi1_phy_pll_out_dsiclk",
- .parent_names = (const char *[]){"dsi1pll_pclk_src"},
- .num_parents = 1,
- .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+ .parent_names = (const char *[]){"dsi1pll_pclk_src",
+ "dsi1pll_shadow_pclk_src"},
+ .num_parents = 2,
+ .flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT |
+ CLK_SET_RATE_NO_REPARENT),
.ops = &clk_regmap_mux_closest_ops,
},
},
@@ -1526,6 +2063,14 @@ static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
+ [SHADOW_VCO_CLK_0] = &dsi0pll_shadow_vco_clk.hw,
+ [SHADOW_PLL_OUT_DIV_0_CLK] = &dsi0pll_shadow_pll_out_div.clkr.hw,
+ [SHADOW_BITCLK_SRC_0_CLK] = &dsi0pll_shadow_bitclk_src.clkr.hw,
+ [SHADOW_BYTECLK_SRC_0_CLK] = &dsi0pll_shadow_byteclk_src.hw,
+ [SHADOW_POST_BIT_DIV_0_CLK] = &dsi0pll_shadow_post_bit_div.hw,
+ [SHADOW_POST_VCO_DIV_0_CLK] = &dsi0pll_shadow_post_vco_div.hw,
+ [SHADOW_PCLK_SRC_MUX_0_CLK] = &dsi0pll_shadow_pclk_src_mux.clkr.hw,
+ [SHADOW_PCLK_SRC_0_CLK] = &dsi0pll_shadow_pclk_src.clkr.hw,
[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
@@ -1536,6 +2081,14 @@ static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
+ [SHADOW_VCO_CLK_1] = &dsi1pll_shadow_vco_clk.hw,
+ [SHADOW_PLL_OUT_DIV_1_CLK] = &dsi1pll_shadow_pll_out_div.clkr.hw,
+ [SHADOW_BITCLK_SRC_1_CLK] = &dsi1pll_shadow_bitclk_src.clkr.hw,
+ [SHADOW_BYTECLK_SRC_1_CLK] = &dsi1pll_shadow_byteclk_src.hw,
+ [SHADOW_POST_BIT_DIV_1_CLK] = &dsi1pll_shadow_post_bit_div.hw,
+ [SHADOW_POST_VCO_DIV_1_CLK] = &dsi1pll_shadow_post_vco_div.hw,
+ [SHADOW_PCLK_SRC_MUX_1_CLK] = &dsi1pll_shadow_pclk_src_mux.clkr.hw,
+ [SHADOW_PCLK_SRC_1_CLK] = &dsi1pll_shadow_pclk_src.clkr.hw,
};
int dsi_pll_clock_register_10nm(struct platform_device *pdev,
@@ -1580,18 +2133,20 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev,
/* Establish client data */
if (ndx == 0) {
-
rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi0pll_pll_out_div.clkr.regmap = rmap;
+ dsi0pll_shadow_pll_out_div.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi0pll_bitclk_src.clkr.regmap = rmap;
+ dsi0pll_shadow_bitclk_src.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi0pll_pclk_src.clkr.regmap = rmap;
+ dsi0pll_shadow_pclk_src.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
@@ -1600,12 +2155,16 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev,
rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi0pll_pclk_src_mux.clkr.regmap = rmap;
+ dsi0pll_shadow_pclk_src_mux.clkr.regmap = rmap;
+
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi0pll_byteclk_mux.clkr.regmap = rmap;
dsi0pll_vco_clk.priv = pll_res;
- for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
+ dsi0pll_shadow_vco_clk.priv = pll_res;
+
+ for (i = VCO_CLK_0; i <= SHADOW_PCLK_SRC_0_CLK; i++) {
clk = devm_clk_register(&pdev->dev,
mdss_dsi_pllcc_10nm[i]);
if (IS_ERR(clk)) {
@@ -1620,20 +2179,21 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev,
rc = of_clk_add_provider(pdev->dev.of_node,
of_clk_src_onecell_get, clk_data);
-
-
} else {
rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_pll_out_div.clkr.regmap = rmap;
+ dsi1pll_shadow_pll_out_div.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_bitclk_src.clkr.regmap = rmap;
+ dsi1pll_shadow_bitclk_src.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_pclk_src.clkr.regmap = rmap;
+ dsi1pll_shadow_pclk_src.clkr.regmap = rmap;
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
@@ -1642,12 +2202,16 @@ int dsi_pll_clock_register_10nm(struct platform_device *pdev,
rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_pclk_src_mux.clkr.regmap = rmap;
+ dsi1pll_shadow_pclk_src_mux.clkr.regmap = rmap;
+
rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
pll_res, &dsi_pll_10nm_config);
dsi1pll_byteclk_mux.clkr.regmap = rmap;
+
dsi1pll_vco_clk.priv = pll_res;
+ dsi1pll_shadow_vco_clk.priv = pll_res;
- for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
+ for (i = VCO_CLK_1; i <= SHADOW_PCLK_SRC_1_CLK; i++) {
clk = devm_clk_register(&pdev->dev,
mdss_dsi_pllcc_10nm[i]);
if (IS_ERR(clk)) {
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 2f92270841ac..e4b5184cb537 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@
writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
(base) + (offset))
+#define upper_8_bit(x) ((((x) >> 2) & 0x100) >> 8)
+
enum {
MDSS_DSI_PLL_10NM,
MDSS_DP_PLL_10NM,
@@ -45,30 +47,23 @@ enum {
MDSS_PLL_TARGET_8996,
};
-#define DFPS_MAX_NUM_OF_FRAME_RATES 20
-
-struct dfps_panel_info {
- uint32_t enabled;
- uint32_t frame_rate_cnt;
- uint32_t frame_rate[DFPS_MAX_NUM_OF_FRAME_RATES]; /* hz */
-};
+#define DFPS_MAX_NUM_OF_FRAME_RATES 16
struct dfps_pll_codes {
uint32_t pll_codes_1;
uint32_t pll_codes_2;
+ uint32_t pll_codes_3;
};
struct dfps_codes_info {
uint32_t is_valid;
- uint32_t frame_rate; /* hz */
uint32_t clk_rate; /* hz */
struct dfps_pll_codes pll_codes;
};
struct dfps_info {
- struct dfps_panel_info panel_dfps;
+ uint32_t vco_rate_cnt;
struct dfps_codes_info codes_dfps[DFPS_MAX_NUM_OF_FRAME_RATES];
- void *dfps_fb_base;
};
struct mdss_pll_resources {
@@ -139,7 +134,7 @@ struct mdss_pll_resources {
/*
* caching the pll trim codes in the case of dynamic refresh
*/
- int cache_pll_trim_codes[2];
+ int cache_pll_trim_codes[3];
/*
* for maintaining the status of saving trim codes
@@ -181,6 +176,11 @@ struct mdss_pll_resources {
*/
struct dfps_info *dfps;
+ /*
+ * for cases where dfps trigger happens before first
+ * suspend/resume and handoff is not finished.
+ */
+ bool dfps_trigger;
};
struct mdss_pll_vco_calc {
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index f6470212d60f..2ab1197dc26b 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1791,7 +1791,7 @@ int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
if (ice_dev == ERR_PTR(-EPROBE_DEFER))
return -EPROBE_DEFER;
- if (!ice_dev)
+ if (!ice_dev || (ice_dev->is_ice_enabled == false))
return ret;
if (enable)
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 958fb9160c2d..6974e1038bc6 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1,7 +1,7 @@
/*
* QTI CE device driver.
*
- * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1227,8 +1227,7 @@ static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
goto exit;
}
- k_align_dst += creq->vbuf.dst[dst_i].len +
- byteoffset;
+ k_align_dst += creq->vbuf.dst[dst_i].len;
creq->data_len -= creq->vbuf.dst[dst_i].len;
dst_i++;
} else {
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 68418a6536b0..beba6635cd2c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -143,4 +143,3 @@ obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
obj-$(CONFIG_GPIO_ZX) += gpio-zx.o
obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
obj-$(CONFIG_MSM_SMP2P) += gpio-msm-smp2p.o
-obj-$(CONFIG_MSM_SMP2P_TEST) += gpio-msm-smp2p-test.o
diff --git a/drivers/gpio/gpio-msm-smp2p-test.c b/drivers/gpio/gpio-msm-smp2p-test.c
deleted file mode 100644
index 1067c4aade39..000000000000
--- a/drivers/gpio/gpio-msm-smp2p-test.c
+++ /dev/null
@@ -1,763 +0,0 @@
-/* drivers/gpio/gpio-msm-smp2p-test.c
- *
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of_gpio.h>
-#include <linux/of_irq.h>
-#include <linux/gpio.h>
-#include <linux/debugfs.h>
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-#include <linux/bitmap.h>
-#include "../soc/qcom/smp2p_private.h"
-#include "../soc/qcom/smp2p_test_common.h"
-
-/* Interrupt callback data */
-struct gpio_info {
- int gpio_base_id;
- int irq_base_id;
-
- bool initialized;
- struct completion cb_completion;
- int cb_count;
- DECLARE_BITMAP(triggered_irqs, SMP2P_BITS_PER_ENTRY);
-};
-
-/* GPIO Inbound/Outbound callback info */
-struct gpio_inout {
- struct gpio_info in;
- struct gpio_info out;
-};
-
-static struct gpio_inout gpio_info[SMP2P_NUM_PROCS];
-
-/**
- * Init/reset the callback data.
- *
- * @info: Pointer to callback data
- */
-static void cb_data_reset(struct gpio_info *info)
-{
- int n;
-
- if (!info)
- return;
-
- if (!info->initialized) {
- init_completion(&info->cb_completion);
- info->initialized = true;
- }
- info->cb_count = 0;
-
- for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n)
- clear_bit(n, info->triggered_irqs);
-
- reinit_completion(&info->cb_completion);
-}
-
-static int smp2p_gpio_test_probe(struct platform_device *pdev)
-{
- int id;
- int cnt;
- struct device_node *node = pdev->dev.of_node;
- struct gpio_info *gpio_info_ptr = NULL;
-
- /*
- * NOTE: This does a string-lookup of the GPIO pin name and doesn't
- * actually directly link to the SMP2P GPIO driver since all
- * GPIO/Interrupt access must be through standard
- * Linux GPIO / Interrupt APIs.
- */
- if (strcmp("qcom,smp2pgpio_test_smp2p_1_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_1_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_2_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_2_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_3_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_SENSOR_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_3_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_SENSOR_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_4_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_4_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_5_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_5_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_7_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_TZ_PROC].in;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_7_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_TZ_PROC].out;
- } else if (strcmp("qcom,smp2pgpio_test_smp2p_15_in", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
- } else if (
- strcmp("qcom,smp2pgpio_test_smp2p_15_out", node->name) == 0) {
- gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
- } else {
- pr_err("%s: unable to match device type '%s'\n",
- __func__, node->name);
- return -ENODEV;
- }
-
- /* retrieve the GPIO and interrupt ID's */
- cnt = of_gpio_count(node);
- if (cnt && gpio_info_ptr) {
- /*
- * Instead of looping through all 32-bits, we can just get the
- * first pin to get the base IDs. This saves on the verbosity
- * of the device tree nodes as well.
- */
- id = of_get_gpio(node, 0);
- if (id == -EPROBE_DEFER)
- return id;
- gpio_info_ptr->gpio_base_id = id;
- gpio_info_ptr->irq_base_id = gpio_to_irq(id);
- }
- return 0;
-}
-
-/*
- * NOTE: Instead of match table and device driver, you may be able to just
- * call of_find_compatible_node() in your init function.
- */
-static const struct of_device_id msm_smp2p_match_table[] = {
- /* modem */
- {.compatible = "qcom,smp2pgpio_test_smp2p_1_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_1_in", },
-
- /* audio (adsp) */
- {.compatible = "qcom,smp2pgpio_test_smp2p_2_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_2_in", },
-
- /* sensor */
- {.compatible = "qcom,smp2pgpio_test_smp2p_3_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_3_in", },
-
- /* wcnss */
- {.compatible = "qcom,smp2pgpio_test_smp2p_4_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_4_in", },
-
- /* CDSP */
- {.compatible = "qcom,smp2pgpio_test_smp2p_5_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_5_in", },
-
- /* TZ */
- {.compatible = "qcom,smp2pgpio_test_smp2p_7_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_7_in", },
-
- /* mock loopback */
- {.compatible = "qcom,smp2pgpio_test_smp2p_15_out", },
- {.compatible = "qcom,smp2pgpio_test_smp2p_15_in", },
- {},
-};
-
-static struct platform_driver smp2p_gpio_driver = {
- .probe = smp2p_gpio_test_probe,
- .driver = {
- .name = "smp2pgpio_test",
- .owner = THIS_MODULE,
- .of_match_table = msm_smp2p_match_table,
- },
-};
-
-/**
- * smp2p_ut_local_gpio_out - Verify outbound functionality.
- *
- * @s: pointer to output file
- */
-static void smp2p_ut_local_gpio_out(struct seq_file *s)
-{
- int failed = 0;
- struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
- int ret;
- int id;
- struct msm_smp2p_remote_mock *mock;
-
- seq_printf(s, "Running %s\n", __func__);
- do {
- /* initialize mock edge */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- mock = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(mock, !=, NULL);
-
- mock->rx_interrupt_count = 0;
- memset(&mock->remote_item, 0,
- sizeof(struct smp2p_smem_item));
- smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
- SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
- 0, 1);
- strlcpy(mock->remote_item.entries[0].name, "smp2p",
- SMP2P_MAX_ENTRY_NAME);
- SMP2P_SET_ENT_VALID(
- mock->remote_item.header.valid_total_ent, 1);
- msm_smp2p_set_remote_mock_exists(true);
- mock->tx_interrupt();
-
- /* open GPIO entry */
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, true);
-
- /* verify set/get functions */
- UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- int pin = cb_info->gpio_base_id + id;
-
- mock->rx_interrupt_count = 0;
- gpio_set_value(pin, 1);
- UT_ASSERT_INT(1, ==, mock->rx_interrupt_count);
- UT_ASSERT_INT(1, ==, gpio_get_value(pin));
-
- gpio_set_value(pin, 0);
- UT_ASSERT_INT(2, ==, mock->rx_interrupt_count);
- UT_ASSERT_INT(0, ==, gpio_get_value(pin));
- }
- if (failed)
- break;
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_gpio_irq - Interrupt handler for inbound entries.
- *
- * @irq: Virtual IRQ being triggered
- * @data: Cookie data (struct gpio_info * in this case)
- * @returns: Number of bytes written
- */
-static irqreturn_t smp2p_gpio_irq(int irq, void *data)
-{
- struct gpio_info *gpio_ptr = (struct gpio_info *)data;
- int offset;
-
- if (!gpio_ptr) {
- pr_err("%s: gpio_ptr is NULL for irq %d\n", __func__, irq);
- return IRQ_HANDLED;
- }
-
- offset = irq - gpio_ptr->irq_base_id;
- if (offset >= 0 && offset < SMP2P_BITS_PER_ENTRY)
- set_bit(offset, gpio_ptr->triggered_irqs);
- else
- pr_err("%s: invalid irq offset base %d; irq %d\n",
- __func__, gpio_ptr->irq_base_id, irq);
-
- ++gpio_ptr->cb_count;
- complete(&gpio_ptr->cb_completion);
- return IRQ_HANDLED;
-}
-
-/**
- * smp2p_ut_local_gpio_in - Verify inbound functionality.
- *
- * @s: pointer to output file
- */
-static void smp2p_ut_local_gpio_in(struct seq_file *s)
-{
- int failed = 0;
- struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
- int id;
- int ret;
- int virq;
- struct msm_smp2p_remote_mock *mock;
-
- seq_printf(s, "Running %s\n", __func__);
-
- cb_data_reset(cb_info);
- do {
- /* initialize mock edge */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- mock = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(mock, !=, NULL);
-
- mock->rx_interrupt_count = 0;
- memset(&mock->remote_item, 0,
- sizeof(struct smp2p_smem_item));
- smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
- SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
- 0, 1);
- strlcpy(mock->remote_item.entries[0].name, "smp2p",
- SMP2P_MAX_ENTRY_NAME);
- SMP2P_SET_ENT_VALID(
- mock->remote_item.header.valid_total_ent, 1);
- msm_smp2p_set_remote_mock_exists(true);
- mock->tx_interrupt();
-
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, true);
-
- /* verify set/get functions locally */
- UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- int pin;
- int current_value;
-
- /* verify pin value cannot be set */
- pin = cb_info->gpio_base_id + id;
- current_value = gpio_get_value(pin);
-
- gpio_set_value(pin, 0);
- UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
- gpio_set_value(pin, 1);
- UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
-
- /* verify no interrupts */
- UT_ASSERT_INT(0, ==, cb_info->cb_count);
- }
- if (failed)
- break;
-
- /* register for interrupts */
- UT_ASSERT_INT(0, <, cb_info->irq_base_id);
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
- UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
- ret = request_irq(virq,
- smp2p_gpio_irq, IRQF_TRIGGER_RISING,
- "smp2p_test", cb_info);
- UT_ASSERT_INT(0, ==, ret);
- }
- if (failed)
- break;
-
- /* verify both rising and falling edge interrupts */
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
- irq_set_irq_type(virq, IRQ_TYPE_EDGE_BOTH);
- cb_data_reset(cb_info);
-
- /* verify rising-edge interrupt */
- mock->remote_item.entries[0].entry = 1 << id;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 1);
- UT_ASSERT_INT(0, <,
- test_bit(id, cb_info->triggered_irqs));
- test_bit(id, cb_info->triggered_irqs);
-
- /* verify falling-edge interrupt */
- mock->remote_item.entries[0].entry = 0;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 2);
- UT_ASSERT_INT(0, <,
- test_bit(id, cb_info->triggered_irqs));
- }
- if (failed)
- break;
-
- /* verify rising-edge interrupts */
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
- irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
- cb_data_reset(cb_info);
-
- /* verify only rising-edge interrupt is triggered */
- mock->remote_item.entries[0].entry = 1 << id;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 1);
- UT_ASSERT_INT(0, <,
- test_bit(id, cb_info->triggered_irqs));
- test_bit(id, cb_info->triggered_irqs);
-
- mock->remote_item.entries[0].entry = 0;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 1);
- UT_ASSERT_INT(0, <,
- test_bit(id, cb_info->triggered_irqs));
- }
- if (failed)
- break;
-
- /* verify falling-edge interrupts */
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
- irq_set_irq_type(virq, IRQ_TYPE_EDGE_FALLING);
- cb_data_reset(cb_info);
-
- /* verify only rising-edge interrupt is triggered */
- mock->remote_item.entries[0].entry = 1 << id;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 0);
- UT_ASSERT_INT(0, ==,
- test_bit(id, cb_info->triggered_irqs));
-
- mock->remote_item.entries[0].entry = 0;
- mock->tx_interrupt();
- UT_ASSERT_INT(cb_info->cb_count, ==, 1);
- UT_ASSERT_INT(0, <,
- test_bit(id, cb_info->triggered_irqs));
- }
- if (failed)
- break;
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-
- /* unregister for interrupts */
- if (cb_info->irq_base_id) {
- for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
- free_irq(cb_info->irq_base_id + id, cb_info);
- }
-
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_ut_local_gpio_in_update_open - Verify combined open/update.
- *
- * @s: pointer to output file
- *
- * If the remote side updates the SMP2P bits and sends before negotiation is
- * complete, then the UPDATE event will have to be delayed until negotiation is
- * complete. This should result in both the OPEN and UPDATE events coming in
- * right after each other and the behavior should be transparent to the clients
- * of SMP2P GPIO.
- */
-static void smp2p_ut_local_gpio_in_update_open(struct seq_file *s)
-{
- int failed = 0;
- struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
- int id;
- int ret;
- int virq;
- struct msm_smp2p_remote_mock *mock;
-
- seq_printf(s, "Running %s\n", __func__);
-
- cb_data_reset(cb_info);
- do {
- /* initialize mock edge */
- ret = smp2p_reset_mock_edge();
- UT_ASSERT_INT(ret, ==, 0);
-
- mock = msm_smp2p_get_remote_mock();
- UT_ASSERT_PTR(mock, !=, NULL);
-
- mock->rx_interrupt_count = 0;
- memset(&mock->remote_item, 0,
- sizeof(struct smp2p_smem_item));
- smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
- SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
- 0, 1);
- strlcpy(mock->remote_item.entries[0].name, "smp2p",
- SMP2P_MAX_ENTRY_NAME);
- SMP2P_SET_ENT_VALID(
- mock->remote_item.header.valid_total_ent, 1);
-
- /* register for interrupts */
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, true);
-
- UT_ASSERT_INT(0, <, cb_info->irq_base_id);
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
- UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
- ret = request_irq(virq,
- smp2p_gpio_irq, IRQ_TYPE_EDGE_BOTH,
- "smp2p_test", cb_info);
- UT_ASSERT_INT(0, ==, ret);
- }
- if (failed)
- break;
-
- /* update the state value and complete negotiation */
- mock->remote_item.entries[0].entry = 0xDEADDEAD;
- msm_smp2p_set_remote_mock_exists(true);
- mock->tx_interrupt();
-
- /* verify delayed state updates were processed */
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- virq = cb_info->irq_base_id + id;
-
- UT_ASSERT_INT(cb_info->cb_count, >, 0);
- if (0x1 & (0xDEADDEAD >> id)) {
- /* rising edge should have been triggered */
- if (!test_bit(id, cb_info->triggered_irqs)) {
- seq_printf(s, "%s:%d bit %d clear, ",
- __func__, __LINE__, id);
- seq_puts(s, "expected set\n");
- failed = 1;
- break;
- }
- } else {
- /* edge should not have been triggered */
- if (test_bit(id, cb_info->triggered_irqs)) {
- seq_printf(s, "%s:%d bit %d set, ",
- __func__, __LINE__, id);
- seq_puts(s, "expected clear\n");
- failed = 1;
- break;
- }
- }
- }
- if (failed)
- break;
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", __func__);
- seq_puts(s, "\tFailed\n");
- }
-
- /* unregister for interrupts */
- if (cb_info->irq_base_id) {
- for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
- free_irq(cb_info->irq_base_id + id, cb_info);
- }
-
- smp2p_gpio_open_test_entry("smp2p",
- SMP2P_REMOTE_MOCK_PROC, false);
-}
-
-/**
- * smp2p_gpio_write_bits - writes value to each GPIO pin specified in mask.
- *
- * @gpio: gpio test structure
- * @mask: 1 = write gpio_value to this GPIO pin
- * @gpio_value: value to write to GPIO pin
- */
-static void smp2p_gpio_write_bits(struct gpio_info *gpio, uint32_t mask,
- int gpio_value)
-{
- int n;
-
- for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
- if (mask & 0x1)
- gpio_set_value(gpio->gpio_base_id + n, gpio_value);
- mask >>= 1;
- }
-}
-
-static void smp2p_gpio_set_bits(struct gpio_info *gpio, uint32_t mask)
-{
- smp2p_gpio_write_bits(gpio, mask, 1);
-}
-
-static void smp2p_gpio_clr_bits(struct gpio_info *gpio, uint32_t mask)
-{
- smp2p_gpio_write_bits(gpio, mask, 0);
-}
-
-/**
- * smp2p_gpio_get_value - reads entire 32-bits of GPIO
- *
- * @gpio: gpio structure
- * @returns: 32 bit value of GPIO pins
- */
-static uint32_t smp2p_gpio_get_value(struct gpio_info *gpio)
-{
- int n;
- uint32_t value = 0;
-
- for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
- if (gpio_get_value(gpio->gpio_base_id + n))
- value |= 1 << n;
- }
- return value;
-}
-
-/**
- * smp2p_ut_remote_inout_core - Verify inbound/outbound functionality.
- *
- * @s: pointer to output file
- * @remote_pid: Remote processor to test
- * @name: Name of the test for reporting
- *
- * This test verifies inbound/outbound functionality for the remote processor.
- */
-static void smp2p_ut_remote_inout_core(struct seq_file *s, int remote_pid,
- const char *name)
-{
- int failed = 0;
- uint32_t request;
- uint32_t response;
- struct gpio_info *cb_in;
- struct gpio_info *cb_out;
- int id;
- int ret;
-
- seq_printf(s, "Running %s for '%s' remote pid %d\n",
- __func__, smp2p_pid_to_name(remote_pid), remote_pid);
-
- cb_in = &gpio_info[remote_pid].in;
- cb_out = &gpio_info[remote_pid].out;
- cb_data_reset(cb_in);
- cb_data_reset(cb_out);
- do {
- /* open test entries */
- msm_smp2p_deinit_rmt_lpb_proc(remote_pid);
- smp2p_gpio_open_test_entry("smp2p", remote_pid, true);
-
- /* register for interrupts */
- UT_ASSERT_INT(0, <, cb_in->gpio_base_id);
- UT_ASSERT_INT(0, <, cb_in->irq_base_id);
- for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
- int virq = cb_in->irq_base_id + id;
-
- UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
- ret = request_irq(virq,
- smp2p_gpio_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "smp2p_test", cb_in);
- UT_ASSERT_INT(0, ==, ret);
- }
- if (failed)
- break;
-
- /* write echo of data value 0 */
- UT_ASSERT_INT(0, <, cb_out->gpio_base_id);
- request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE(request, 1);
- SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
- SMP2P_SET_RMT_DATA(request, 0x0);
-
- smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
- smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
- smp2p_gpio_set_bits(cb_out, request);
-
- UT_ASSERT_INT(cb_in->cb_count, ==, 0);
- smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-
- /* verify response */
- do {
- /* wait for up to 32 changes */
- if (wait_for_completion_timeout(
- &cb_in->cb_completion, HZ / 2) == 0)
- break;
- reinit_completion(&cb_in->cb_completion);
- } while (cb_in->cb_count < 32);
- UT_ASSERT_INT(cb_in->cb_count, >, 0);
- response = smp2p_gpio_get_value(cb_in);
- SMP2P_SET_RMT_CMD_TYPE(request, 0);
- UT_ASSERT_HEX(request, ==, response);
-
- /* write echo of data value of all 1's */
- request = 0x0;
- SMP2P_SET_RMT_CMD_TYPE(request, 1);
- SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
- SMP2P_SET_RMT_DATA(request, ~0);
-
- smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
- cb_data_reset(cb_in);
- smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
- smp2p_gpio_set_bits(cb_out, request);
-
- UT_ASSERT_INT(cb_in->cb_count, ==, 0);
- smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
-
- /* verify response including 24 interrupts */
- do {
- UT_ASSERT_INT(
- (int)wait_for_completion_timeout(
- &cb_in->cb_completion, HZ / 2),
- >, 0);
- reinit_completion(&cb_in->cb_completion);
- } while (cb_in->cb_count < 24);
- response = smp2p_gpio_get_value(cb_in);
- SMP2P_SET_RMT_CMD_TYPE(request, 0);
- UT_ASSERT_HEX(request, ==, response);
- UT_ASSERT_INT(24, ==, cb_in->cb_count);
-
- seq_puts(s, "\tOK\n");
- } while (0);
-
- if (failed) {
- pr_err("%s: Failed\n", name);
- seq_puts(s, "\tFailed\n");
- }
-
- /* unregister for interrupts */
- if (cb_in->irq_base_id) {
- for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
- free_irq(cb_in->irq_base_id + id, cb_in);
- }
-
- smp2p_gpio_open_test_entry("smp2p", remote_pid, false);
- msm_smp2p_init_rmt_lpb_proc(remote_pid);
-}
-
-/**
- * smp2p_ut_remote_inout - Verify inbound/outbound functionality for all.
- *
- * @s: pointer to output file
- *
- * This test verifies inbound and outbound functionality for all
- * configured remote processor.
- */
-static void smp2p_ut_remote_inout(struct seq_file *s)
-{
- struct smp2p_interrupt_config *int_cfg;
- int pid;
-
- int_cfg = smp2p_get_interrupt_config();
- if (!int_cfg) {
- seq_puts(s, "Remote processor config unavailable\n");
- return;
- }
-
- for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
- if (!int_cfg[pid].is_configured)
- continue;
-
- smp2p_ut_remote_inout_core(s, pid, __func__);
- }
-}
-
-static int __init smp2p_debugfs_init(void)
-{
- /* register GPIO pins */
- (void)platform_driver_register(&smp2p_gpio_driver);
-
- /*
- * Add Unit Test entries.
- *
- * The idea with unit tests is that you can run all of them
- * from ADB shell by doing:
- * adb shell
- * cat ut*
- *
- * And if particular tests fail, you can then repeatedly run the
- * failing tests as you debug and resolve the failing test.
- */
- smp2p_debug_create("ut_local_gpio_out", smp2p_ut_local_gpio_out);
- smp2p_debug_create("ut_local_gpio_in", smp2p_ut_local_gpio_in);
- smp2p_debug_create("ut_local_gpio_in_update_open",
- smp2p_ut_local_gpio_in_update_open);
- smp2p_debug_create("ut_remote_gpio_inout", smp2p_ut_remote_inout);
- return 0;
-}
-late_initcall(smp2p_debugfs_init);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 78bea02955fb..e21614aa41e9 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -49,6 +49,7 @@ struct dp_debug_private {
struct device *dev;
struct work_struct sim_work;
struct dp_debug dp_debug;
+ struct mutex lock;
};
static int dp_debug_get_edid_buf(struct dp_debug_private *debug)
@@ -98,13 +99,15 @@ static ssize_t dp_debug_write_edid(struct file *file,
if (!debug)
return -ENODEV;
+ mutex_lock(&debug->lock);
+
if (*ppos)
goto bail;
size = min_t(size_t, count, SZ_1K);
buf = kzalloc(size, GFP_KERNEL);
- if (!buf) {
+ if (ZERO_OR_NULL_PTR(buf)) {
rc = -ENOMEM;
goto bail;
}
@@ -148,6 +151,7 @@ bail:
if (!debug->dp_debug.sim_mode)
debug->panel->set_edid(debug->panel, edid);
+ mutex_unlock(&debug->lock);
return rc;
}
@@ -166,13 +170,15 @@ static ssize_t dp_debug_write_dpcd(struct file *file,
if (!debug)
return -ENODEV;
+ mutex_lock(&debug->lock);
+
if (*ppos)
goto bail;
size = min_t(size_t, count, SZ_2K);
buf = kzalloc(size, GFP_KERNEL);
- if (!buf) {
+ if (ZERO_OR_NULL_PTR(buf)) {
rc = -ENOMEM;
goto bail;
}
@@ -230,6 +236,7 @@ bail:
else
debug->panel->set_dpcd(debug->panel, dpcd);
+ mutex_unlock(&debug->lock);
return rc;
}
@@ -493,7 +500,7 @@ static ssize_t dp_debug_read_edid_modes(struct file *file,
goto error;
buf = kzalloc(SZ_4K, GFP_KERNEL);
- if (!buf) {
+ if (ZERO_OR_NULL_PTR(buf)) {
rc = -ENOMEM;
goto error;
}
@@ -538,7 +545,7 @@ static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff,
return 0;
buf = kzalloc(SZ_4K, GFP_KERNEL);
- if (!buf)
+ if (ZERO_OR_NULL_PTR(buf))
return -ENOMEM;
rc = snprintf(buf + len, max_size, "\tstate=0x%x\n", debug->aux->state);
@@ -624,7 +631,7 @@ static ssize_t dp_debug_bw_code_read(struct file *file,
return 0;
buf = kzalloc(SZ_4K, GFP_KERNEL);
- if (!buf)
+ if (ZERO_OR_NULL_PTR(buf))
return -ENOMEM;
len += snprintf(buf + len, (SZ_4K - len),
@@ -745,7 +752,7 @@ static ssize_t dp_debug_read_hdr(struct file *file,
goto error;
buf = kzalloc(SZ_4K, GFP_KERNEL);
- if (!buf) {
+ if (ZERO_OR_NULL_PTR(buf)) {
rc = -ENOMEM;
goto error;
}
@@ -873,6 +880,8 @@ static ssize_t dp_debug_write_sim(struct file *file,
if (*ppos)
return 0;
+ mutex_lock(&debug->lock);
+
/* Leave room for termination char */
len = min_t(size_t, count, SZ_8 - 1);
if (copy_from_user(buf, user_buff, len))
@@ -906,9 +915,11 @@ static ssize_t dp_debug_write_sim(struct file *file,
debug->aux->set_sim_mode(debug->aux, debug->dp_debug.sim_mode,
debug->edid, debug->dpcd);
end:
+ mutex_unlock(&debug->lock);
return len;
error:
devm_kfree(debug->dev, debug->edid);
+ mutex_unlock(&debug->lock);
return len;
}
@@ -1272,6 +1283,8 @@ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
dp_debug->hdisplay = 0;
dp_debug->vrefresh = 0;
+ mutex_init(&debug->lock);
+
rc = dp_debug_init(dp_debug);
if (rc) {
devm_kfree(dev, debug);
@@ -1308,6 +1321,8 @@ void dp_debug_put(struct dp_debug *dp_debug)
dp_debug_deinit(dp_debug);
+ mutex_destroy(&debug->lock);
+
if (debug->edid)
devm_kfree(debug->dev, debug->edid);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index d083e72cbaa7..efb36bf9955a 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -69,6 +69,9 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
ctrl->ops.get_hw_version = dsi_ctrl_hw_cmn_get_hw_version;
ctrl->ops.wait_for_cmd_mode_mdp_idle =
dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle;
+ ctrl->ops.set_continuous_clk = dsi_ctrl_hw_cmn_set_continuous_clk;
+ ctrl->ops.wait4dynamic_refresh_done =
+ dsi_ctrl_hw_cmn_wait4dynamic_refresh_done;
switch (version) {
case DSI_CTRL_VERSION_1_4:
@@ -214,8 +217,17 @@ static void dsi_catalog_phy_3_0_init(struct dsi_phy_hw *phy)
phy->ops.ulps_ops.is_lanes_in_ulps =
dsi_phy_hw_v3_0_is_lanes_in_ulps;
phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v3_0;
+ phy->ops.clamp_ctrl = dsi_phy_hw_v3_0_clamp_ctrl;
phy->ops.phy_lane_reset = dsi_phy_hw_v3_0_lane_reset;
phy->ops.toggle_resync_fifo = dsi_phy_hw_v3_0_toggle_resync_fifo;
+ phy->ops.dyn_refresh_ops.dyn_refresh_config =
+ dsi_phy_hw_v3_0_dyn_refresh_config;
+ phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay =
+ dsi_phy_hw_v3_0_dyn_refresh_pipe_delay;
+ phy->ops.dyn_refresh_ops.dyn_refresh_helper =
+ dsi_phy_hw_v3_0_dyn_refresh_helper;
+ phy->ops.dyn_refresh_ops.cache_phy_timings =
+ dsi_phy_hw_v3_0_cache_phy_timings;
}
/**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 0e2db4304a0d..90dfc97cd6b6 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -66,15 +66,17 @@ int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy,
* @mode: DSI mode information.
* @host: DSI host configuration.
* @timing: DSI phy lane configurations.
+ * @use_mode_bit_clk: Boolean to indicate whether to recalculate bit clk.
*
* This function setups the catalog information in the dsi_phy_hw object.
*
* return: error code for failure and 0 for success.
*/
int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
- struct dsi_mode_info *mode,
- struct dsi_host_common_cfg *host,
- struct dsi_phy_per_lane_cfgs *timing);
+ struct dsi_mode_info *mode,
+ struct dsi_host_common_cfg *host,
+ struct dsi_phy_per_lane_cfgs *timing,
+ bool use_mode_bit_clk);
/* Definitions for 14nm PHY hardware driver */
void dsi_phy_hw_v2_0_regulator_enable(struct dsi_phy_hw *phy,
@@ -102,6 +104,7 @@ u32 dsi_phy_hw_v3_0_get_lanes_in_ulps(struct dsi_phy_hw *phy);
bool dsi_phy_hw_v3_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes);
int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
u32 *timing_val, u32 size);
+void dsi_phy_hw_v3_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable);
int dsi_phy_hw_v3_0_lane_reset(struct dsi_phy_hw *phy);
void dsi_phy_hw_v3_0_toggle_resync_fifo(struct dsi_phy_hw *phy);
@@ -222,4 +225,16 @@ void dsi_ctrl_hw_kickoff_non_embedded_mode(struct dsi_ctrl_hw *ctrl,
/* Definitions specific to 2.2 DSI controller hardware */
bool dsi_ctrl_hw_22_get_cont_splash_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable);
+
+/* dynamic refresh specific functions */
+void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset);
+void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+ struct dsi_phy_cfg *cfg, bool is_master);
+void dsi_phy_hw_v3_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
+ struct dsi_dyn_clk_delay *delay);
+
+int dsi_ctrl_hw_cmn_wait4dynamic_refresh_done(struct dsi_ctrl_hw *ctrl);
+int dsi_phy_hw_v3_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+ u32 *dst, u32 size);
#endif /* _DSI_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
index d89760ec0049..cdcb331dbf9d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
@@ -43,6 +43,13 @@ enum dsi_link_clk_type {
DSI_LINK_CLK_MAX,
};
+enum dsi_link_clk_op_type {
+ DSI_LINK_CLK_SET_RATE = BIT(0),
+ DSI_LINK_CLK_PREPARE = BIT(1),
+ DSI_LINK_CLK_ENABLE = BIT(2),
+ DSI_LINK_CLK_START = BIT(0) | BIT(1) | BIT(2),
+};
+
enum dsi_clk_type {
DSI_CORE_CLK = BIT(0),
DSI_LINK_CLK = BIT(1),
@@ -50,6 +57,12 @@ enum dsi_clk_type {
DSI_CLKS_MAX = BIT(2),
};
+enum dsi_lclk_type {
+ DSI_LINK_NONE = 0,
+ DSI_LINK_LP_CLK = BIT(0),
+ DSI_LINK_HS_CLK = BIT(1),
+};
+
struct dsi_clk_ctrl_info {
enum dsi_clk_type clk_type;
enum dsi_clk_state clk_state;
@@ -82,23 +95,29 @@ struct dsi_core_clk_info {
};
/**
- * struct dsi_link_clk_info - Link clock information for DSI hardware.
- * @byte_clk: Handle to DSI byte clock.
- * @pixel_clk: Handle to DSI pixel clock.
- * @esc_clk: Handle to DSI escape clock.
+ * struct dsi_link_hs_clk_info - Set of high speed link clocks for DSI HW
+ * @byte_clk: Handle to DSI byte_clk.
+ * @pixel_clk: Handle to DSI pixel_clk.
* @byte_intf_clk: Handle to DSI byte intf. clock.
*/
-struct dsi_link_clk_info {
+struct dsi_link_hs_clk_info {
struct clk *byte_clk;
struct clk *pixel_clk;
- struct clk *esc_clk;
struct clk *byte_intf_clk;
};
/**
+ * struct dsi_link_lp_clk_info - Set of low power link clocks for DSI HW.
+ * @esc_clk: Handle to DSI escape clock.
+ */
+struct dsi_link_lp_clk_info {
+ struct clk *esc_clk;
+};
+
+/**
* struct link_clk_freq - Clock frequency information for Link clocks
- * @byte_clk_rate: Frequency of DSI byte clock in KHz.
- * @pixel_clk_rate: Frequency of DSI pixel clock in KHz.
+ * @byte_clk_rate: Frequency of DSI byte_clk in KHz.
+ * @pixel_clk_rate: Frequency of DSI pixel_clk in KHz.
* @esc_clk_rate: Frequency of DSI escape clock in KHz.
*/
struct link_clk_freq {
@@ -111,48 +130,56 @@ struct link_clk_freq {
* typedef *pre_clockoff_cb() - Callback before clock is turned off
* @priv: private data pointer.
* @clk_type: clock which is being turned off.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @new_state: next state for the clock.
*
* @return: error code.
*/
typedef int (*pre_clockoff_cb)(void *priv,
enum dsi_clk_type clk_type,
+ enum dsi_lclk_type l_type,
enum dsi_clk_state new_state);
/**
* typedef *post_clockoff_cb() - Callback after clock is turned off
* @priv: private data pointer.
* @clk_type: clock which was turned off.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @curr_state: current state for the clock.
*
* @return: error code.
*/
typedef int (*post_clockoff_cb)(void *priv,
enum dsi_clk_type clk_type,
+ enum dsi_lclk_type l_type,
enum dsi_clk_state curr_state);
/**
* typedef *post_clockon_cb() - Callback after clock is turned on
* @priv: private data pointer.
* @clk_type: clock which was turned on.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @curr_state: current state for the clock.
*
* @return: error code.
*/
typedef int (*post_clockon_cb)(void *priv,
enum dsi_clk_type clk_type,
+ enum dsi_lclk_type l_type,
enum dsi_clk_state curr_state);
/**
* typedef *pre_clockon_cb() - Callback before clock is turned on
* @priv: private data pointer.
* @clk_type: clock which is being turned on.
+ * @l_type: specifies if the clock is HS or LP type.Valid only for link clocks.
* @new_state: next state for the clock.
*
* @return: error code.
*/
typedef int (*pre_clockon_cb)(void *priv,
enum dsi_clk_type clk_type,
+ enum dsi_lclk_type l_type,
enum dsi_clk_state new_state);
@@ -160,7 +187,8 @@ typedef int (*pre_clockon_cb)(void *priv,
* struct dsi_clk_info - clock information for DSI hardware.
* @name: client name.
* @c_clks[MAX_DSI_CTRL] array of core clock configurations
- * @l_clks[MAX_DSI_CTRL] array of link clock configurations
+ * @l_lp_clks[MAX_DSI_CTRL] array of low power(esc) clock configurations
+ * @l_hs_clks[MAX_DSI_CTRL] array of high speed clock configurations
* @bus_handle[MAX_DSI_CTRL] array of bus handles
* @ctrl_index[MAX_DSI_CTRL] array of DSI controller indexes mapped
* to core and link clock configurations
@@ -175,7 +203,8 @@ typedef int (*pre_clockon_cb)(void *priv,
struct dsi_clk_info {
char name[MAX_STRING_LEN];
struct dsi_core_clk_info c_clks[MAX_DSI_CTRL];
- struct dsi_link_clk_info l_clks[MAX_DSI_CTRL];
+ struct dsi_link_lp_clk_info l_lp_clks[MAX_DSI_CTRL];
+ struct dsi_link_hs_clk_info l_hs_clks[MAX_DSI_CTRL];
u32 bus_handle[MAX_DSI_CTRL];
u32 ctrl_index[MAX_DSI_CTRL];
pre_clockoff_cb pre_clkoff_cb;
@@ -189,8 +218,8 @@ struct dsi_clk_info {
/**
* struct dsi_clk_link_set - Pair of clock handles to describe link clocks
- * @byte_clk: Handle to DSi byte clock.
- * @pixel_clk: Handle to DSI pixel clock.
+ * @byte_clk: Handle to DSi byte_clk.
+ * @pixel_clk: Handle to DSI pixel_clk.
*/
struct dsi_clk_link_set {
struct clk *byte_clk;
@@ -263,10 +292,10 @@ int dsi_clk_set_link_frequencies(void *client, struct link_clk_freq freq,
/**
- * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
+ * dsi_clk_set_pixel_clk_rate() - set frequency for pixel_clk
* @client: DSI clock client pointer.
- * @pixel_clk: Pixel clock rate in Hz.
- * @index: Index of the DSI controller.
+ * @pixel_clk: Pixel_clk rate in Hz.
+ * @index: Index of the DSI controller.
* return: error code in case of failure or 0 for success.
*/
int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index);
@@ -288,4 +317,18 @@ int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index);
*/
int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
struct dsi_clk_link_set *child);
+
+/**
+ * dsi_clk_prepare_enable() - prepare and enable dsi src clocks
+ * @clk: list of src clocks.
+ *
+ * @return: Zero on success and err no on failure
+ */
+int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk);
+
+/**
+ * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks
+ * @clk: list of src clocks.
+ */
+void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk);
#endif /* _DSI_CLK_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index 434d38306df5..9592603f7491 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -24,7 +24,8 @@ struct dsi_core_clks {
};
struct dsi_link_clks {
- struct dsi_link_clk_info clks;
+ struct dsi_link_hs_clk_info hs_clks;
+ struct dsi_link_lp_clk_info lp_clks;
struct link_clk_freq freq;
};
@@ -112,8 +113,9 @@ int dsi_clk_set_link_frequencies(void *client, struct link_clk_freq freq,
/**
* dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
- * @clks: DSI link clock information.
- * @pixel_clk: Pixel clock rate in KHz.
+ * @clks: DSI link clock information.
+ * @pixel_clk: Pixel clock rate in KHz.
+ * @index: Index of the DSI controller.
*
* return: error code in case of failure or 0 for success.
*/
@@ -124,7 +126,7 @@ int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index)
struct dsi_clk_mngr *mngr;
mngr = c->mngr;
- rc = clk_set_rate(mngr->link_clks[index].clks.pixel_clk, pixel_clk);
+ rc = clk_set_rate(mngr->link_clks[index].hs_clks.pixel_clk, pixel_clk);
if (rc)
pr_err("failed to set clk rate for pixel clk, rc=%d\n", rc);
else
@@ -135,9 +137,9 @@ int dsi_clk_set_pixel_clk_rate(void *client, u64 pixel_clk, u32 index)
/**
* dsi_clk_set_byte_clk_rate() - set frequency for byte clock
- * @client: DSI clock client pointer.
- * @byte_clk: Pixel clock rate in Hz.
- * @index: Index of the DSI controller.
+ * @client: DSI clock client pointer.
+ * @byte_clk: Byte clock rate in Hz.
+ * @index: Index of the DSI controller.
* return: error code in case of failure or 0 for success.
*/
int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index)
@@ -145,16 +147,25 @@ int dsi_clk_set_byte_clk_rate(void *client, u64 byte_clk, u32 index)
int rc = 0;
struct dsi_clk_client_info *c = client;
struct dsi_clk_mngr *mngr;
+ u64 byte_intf_rate;
mngr = c->mngr;
- rc = clk_set_rate(mngr->link_clks[index].clks.byte_clk, byte_clk);
+ rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_clk, byte_clk);
if (rc)
pr_err("failed to set clk rate for byte clk, rc=%d\n", rc);
else
mngr->link_clks[index].freq.byte_clk_rate = byte_clk;
- return rc;
+ if (mngr->link_clks[index].hs_clks.byte_intf_clk) {
+ byte_intf_rate = mngr->link_clks[index].freq.byte_clk_rate / 2;
+ rc = clk_set_rate(mngr->link_clks[index].hs_clks.byte_intf_clk,
+ byte_intf_rate);
+ if (rc)
+ pr_err("failed to set clk rate for byte intf clk=%d\n",
+ rc);
+ }
+ return rc;
}
/**
@@ -182,6 +193,41 @@ error:
return rc;
}
+/**
+ * dsi_clk_prepare_enable() - prepare and enable dsi src clocks
+ * @clk: list of src clocks.
+ *
+ * @return: Zero on success and err no on failure.
+ */
+int dsi_clk_prepare_enable(struct dsi_clk_link_set *clk)
+{
+ int rc;
+
+ rc = clk_prepare_enable(clk->byte_clk);
+ if (rc) {
+ pr_err("failed to enable byte src clk %d\n", rc);
+ return rc;
+ }
+
+ rc = clk_prepare_enable(clk->pixel_clk);
+ if (rc) {
+ pr_err("failed to enable pixel src clk %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * dsi_clk_disable_unprepare() - disable and unprepare dsi src clocks
+ * @clk: list of src clocks.
+ */
+void dsi_clk_disable_unprepare(struct dsi_clk_link_set *clk)
+{
+ clk_disable_unprepare(clk->pixel_clk);
+ clk_disable_unprepare(clk->byte_clk);
+}
+
int dsi_core_clk_start(struct dsi_core_clks *c_clks)
{
int rc = 0;
@@ -285,38 +331,39 @@ int dsi_core_clk_stop(struct dsi_core_clks *c_clks)
return rc;
}
-static int dsi_link_clk_set_rate(struct dsi_link_clks *l_clks, int index)
+static int dsi_link_hs_clk_set_rate(struct dsi_link_hs_clk_info *link_hs_clks,
+ int index)
{
int rc = 0;
struct dsi_clk_mngr *mngr;
+ struct dsi_link_clks *l_clks;
if (index >= MAX_DSI_CTRL) {
pr_err("Invalid DSI ctrl index\n");
return -EINVAL;
}
+ l_clks = container_of(link_hs_clks, struct dsi_link_clks, hs_clks);
mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[index]);
- if (mngr->is_cont_splash_enabled)
- return 0;
+
/*
* In an ideal world, cont_splash_enabled should not be required inside
* the clock manager. But, in the current driver cont_splash_enabled
* flag is set inside mdp driver and there is no interface event
* associated with this flag setting.
*/
- rc = clk_set_rate(l_clks->clks.esc_clk, l_clks->freq.esc_clk_rate);
- if (rc) {
- pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
- goto error;
- }
+ if (mngr->is_cont_splash_enabled)
+ return 0;
- rc = clk_set_rate(l_clks->clks.byte_clk, l_clks->freq.byte_clk_rate);
+ rc = clk_set_rate(link_hs_clks->byte_clk,
+ l_clks->freq.byte_clk_rate);
if (rc) {
pr_err("clk_set_rate failed for byte_clk rc = %d\n", rc);
goto error;
}
- rc = clk_set_rate(l_clks->clks.pixel_clk, l_clks->freq.pix_clk_rate);
+ rc = clk_set_rate(link_hs_clks->pixel_clk,
+ l_clks->freq.pix_clk_rate);
if (rc) {
pr_err("clk_set_rate failed for pixel_clk rc = %d\n", rc);
goto error;
@@ -327,8 +374,8 @@ static int dsi_link_clk_set_rate(struct dsi_link_clks *l_clks, int index)
* For DPHY: byte_intf_clk_rate = byte_clk_rate / 2
* todo: this needs to be revisited when support for CPHY is added
*/
- if (l_clks->clks.byte_intf_clk) {
- rc = clk_set_rate(l_clks->clks.byte_intf_clk,
+ if (link_hs_clks->byte_intf_clk) {
+ rc = clk_set_rate(link_hs_clks->byte_intf_clk,
(l_clks->freq.byte_clk_rate / 2));
if (rc) {
pr_err("set_rate failed for byte_intf_clk rc = %d\n",
@@ -340,30 +387,24 @@ error:
return rc;
}
-static int dsi_link_clk_prepare(struct dsi_link_clks *l_clks)
+static int dsi_link_hs_clk_prepare(struct dsi_link_hs_clk_info *link_hs_clks)
{
int rc = 0;
- rc = clk_prepare(l_clks->clks.esc_clk);
- if (rc) {
- pr_err("Failed to prepare dsi esc clk, rc=%d\n", rc);
- goto esc_clk_err;
- }
-
- rc = clk_prepare(l_clks->clks.byte_clk);
+ rc = clk_prepare(link_hs_clks->byte_clk);
if (rc) {
pr_err("Failed to prepare dsi byte clk, rc=%d\n", rc);
goto byte_clk_err;
}
- rc = clk_prepare(l_clks->clks.pixel_clk);
+ rc = clk_prepare(link_hs_clks->pixel_clk);
if (rc) {
pr_err("Failed to prepare dsi pixel clk, rc=%d\n", rc);
goto pixel_clk_err;
}
- if (l_clks->clks.byte_intf_clk) {
- rc = clk_prepare(l_clks->clks.byte_intf_clk);
+ if (link_hs_clks->byte_intf_clk) {
+ rc = clk_prepare(link_hs_clks->byte_intf_clk);
if (rc) {
pr_err("Failed to prepare dsi byte intf clk, rc=%d\n",
rc);
@@ -374,48 +415,39 @@ static int dsi_link_clk_prepare(struct dsi_link_clks *l_clks)
return rc;
byte_intf_clk_err:
- clk_unprepare(l_clks->clks.pixel_clk);
+ clk_unprepare(link_hs_clks->pixel_clk);
pixel_clk_err:
- clk_unprepare(l_clks->clks.byte_clk);
+ clk_unprepare(link_hs_clks->byte_clk);
byte_clk_err:
- clk_unprepare(l_clks->clks.esc_clk);
-esc_clk_err:
return rc;
}
-static void dsi_link_clk_unprepare(struct dsi_link_clks *l_clks)
+static void dsi_link_hs_clk_unprepare(struct dsi_link_hs_clk_info *link_hs_clks)
{
- if (l_clks->clks.byte_intf_clk)
- clk_unprepare(l_clks->clks.byte_intf_clk);
- clk_unprepare(l_clks->clks.pixel_clk);
- clk_unprepare(l_clks->clks.byte_clk);
- clk_unprepare(l_clks->clks.esc_clk);
+ if (link_hs_clks->byte_intf_clk)
+ clk_unprepare(link_hs_clks->byte_intf_clk);
+ clk_unprepare(link_hs_clks->pixel_clk);
+ clk_unprepare(link_hs_clks->byte_clk);
}
-static int dsi_link_clk_enable(struct dsi_link_clks *l_clks)
+static int dsi_link_hs_clk_enable(struct dsi_link_hs_clk_info *link_hs_clks)
{
int rc = 0;
- rc = clk_enable(l_clks->clks.esc_clk);
- if (rc) {
- pr_err("Failed to enable dsi esc clk, rc=%d\n", rc);
- goto esc_clk_err;
- }
-
- rc = clk_enable(l_clks->clks.byte_clk);
+ rc = clk_enable(link_hs_clks->byte_clk);
if (rc) {
pr_err("Failed to enable dsi byte clk, rc=%d\n", rc);
goto byte_clk_err;
}
- rc = clk_enable(l_clks->clks.pixel_clk);
+ rc = clk_enable(link_hs_clks->pixel_clk);
if (rc) {
pr_err("Failed to enable dsi pixel clk, rc=%d\n", rc);
goto pixel_clk_err;
}
- if (l_clks->clks.byte_intf_clk) {
- rc = clk_enable(l_clks->clks.byte_intf_clk);
+ if (link_hs_clks->byte_intf_clk) {
+ rc = clk_enable(link_hs_clks->byte_intf_clk);
if (rc) {
pr_err("Failed to enable dsi byte intf clk, rc=%d\n",
rc);
@@ -426,28 +458,26 @@ static int dsi_link_clk_enable(struct dsi_link_clks *l_clks)
return rc;
byte_intf_clk_err:
- clk_disable(l_clks->clks.pixel_clk);
+ clk_disable(link_hs_clks->pixel_clk);
pixel_clk_err:
- clk_disable(l_clks->clks.byte_clk);
+ clk_disable(link_hs_clks->byte_clk);
byte_clk_err:
- clk_disable(l_clks->clks.esc_clk);
-esc_clk_err:
return rc;
}
-static void dsi_link_clk_disable(struct dsi_link_clks *l_clks)
+static void dsi_link_hs_clk_disable(struct dsi_link_hs_clk_info *link_hs_clks)
{
- if (l_clks->clks.byte_intf_clk)
- clk_disable(l_clks->clks.byte_intf_clk);
- clk_disable(l_clks->clks.esc_clk);
- clk_disable(l_clks->clks.pixel_clk);
- clk_disable(l_clks->clks.byte_clk);
+ if (link_hs_clks->byte_intf_clk)
+ clk_disable(link_hs_clks->byte_intf_clk);
+ clk_disable(link_hs_clks->pixel_clk);
+ clk_disable(link_hs_clks->byte_clk);
}
/**
* dsi_link_clk_start() - enable dsi link clocks
*/
-static int dsi_link_clk_start(struct dsi_link_clks *clks, int index)
+static int dsi_link_hs_clk_start(struct dsi_link_hs_clk_info *link_hs_clks,
+ enum dsi_link_clk_op_type op_type, int index)
{
int rc = 0;
@@ -456,28 +486,34 @@ static int dsi_link_clk_start(struct dsi_link_clks *clks, int index)
return -EINVAL;
}
- rc = dsi_link_clk_set_rate(clks, index);
- if (rc) {
- pr_err("failed to set clk rates, rc = %d\n", rc);
- goto error;
+ if (op_type & DSI_LINK_CLK_SET_RATE) {
+ rc = dsi_link_hs_clk_set_rate(link_hs_clks, index);
+ if (rc) {
+ pr_err("failed to set HS clk rates, rc = %d\n", rc);
+ goto error;
+ }
}
- rc = dsi_link_clk_prepare(clks);
- if (rc) {
- pr_err("failed to prepare link clks, rc = %d\n", rc);
- goto error;
+ if (op_type & DSI_LINK_CLK_PREPARE) {
+ rc = dsi_link_hs_clk_prepare(link_hs_clks);
+ if (rc) {
+ pr_err("failed to prepare link HS clks, rc = %d\n", rc);
+ goto error;
+ }
}
- rc = dsi_link_clk_enable(clks);
- if (rc) {
- pr_err("failed to enable link clks, rc = %d\n", rc);
- goto error_unprepare;
+ if (op_type & DSI_LINK_CLK_ENABLE) {
+ rc = dsi_link_hs_clk_enable(link_hs_clks);
+ if (rc) {
+ pr_err("failed to enable link HS clks, rc = %d\n", rc);
+ goto error_unprepare;
+ }
}
- pr_debug("Link clocks are enabled\n");
+ pr_debug("HS Link clocks are enabled\n");
return rc;
error_unprepare:
- dsi_link_clk_unprepare(clks);
+ dsi_link_hs_clk_unprepare(link_hs_clks);
error:
return rc;
}
@@ -485,16 +521,79 @@ error:
/**
* dsi_link_clk_stop() - Stop DSI link clocks.
*/
-int dsi_link_clk_stop(struct dsi_link_clks *clks)
+static int dsi_link_hs_clk_stop(struct dsi_link_hs_clk_info *link_hs_clks)
{
- dsi_link_clk_disable(clks);
- dsi_link_clk_unprepare(clks);
+ struct dsi_link_clks *l_clks;
- pr_debug("Link clocks disabled\n");
+ l_clks = container_of(link_hs_clks, struct dsi_link_clks, hs_clks);
+
+ dsi_link_hs_clk_disable(link_hs_clks);
+ dsi_link_hs_clk_unprepare(link_hs_clks);
+
+ pr_debug("HS Link clocks disabled\n");
return 0;
}
+static int dsi_link_lp_clk_start(struct dsi_link_lp_clk_info *link_lp_clks,
+ int index)
+{
+ int rc = 0;
+ struct dsi_clk_mngr *mngr;
+ struct dsi_link_clks *l_clks;
+
+ if (index >= MAX_DSI_CTRL) {
+ pr_err("Invalid DSI ctrl index\n");
+ return -EINVAL;
+ }
+
+ l_clks = container_of(link_lp_clks, struct dsi_link_clks, lp_clks);
+
+ mngr = container_of(l_clks, struct dsi_clk_mngr, link_clks[index]);
+ if (!mngr)
+ return -EINVAL;
+
+ /*
+ * In an ideal world, cont_splash_enabled should not be required inside
+ * the clock manager. But, in the current driver cont_splash_enabled
+ * flag is set inside mdp driver and there is no interface event
+ * associated with this flag setting. Also, set rate for clock need not
+ * be called for every enable call. It should be done only once when
+ * coming out of suspend.
+ */
+ if (mngr->is_cont_splash_enabled)
+ goto prepare;
+
+ rc = clk_set_rate(link_lp_clks->esc_clk, l_clks->freq.esc_clk_rate);
+ if (rc) {
+ pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
+ goto error;
+ }
+
+prepare:
+ rc = clk_prepare_enable(link_lp_clks->esc_clk);
+ if (rc) {
+ pr_err("Failed to enable dsi esc clk\n");
+ clk_unprepare(l_clks->lp_clks.esc_clk);
+ }
+error:
+ pr_debug("LP Link clocks are enabled\n");
+ return rc;
+}
+
+static int dsi_link_lp_clk_stop(
+ struct dsi_link_lp_clk_info *link_lp_clks)
+{
+ struct dsi_link_clks *l_clks;
+
+ l_clks = container_of(link_lp_clks, struct dsi_link_clks, lp_clks);
+
+ clk_disable_unprepare(l_clks->lp_clks.esc_clk);
+
+ pr_debug("LP Link clocks are disabled\n");
+ return 0;
+}
+
static int dsi_display_core_clk_enable(struct dsi_core_clks *clks,
u32 ctrl_count, u32 master_ndx)
{
@@ -556,7 +655,7 @@ error:
}
static int dsi_display_link_clk_enable(struct dsi_link_clks *clks,
- u32 ctrl_count, u32 master_ndx)
+ enum dsi_lclk_type l_type, u32 ctrl_count, u32 master_ndx)
{
int rc = 0;
int i;
@@ -570,27 +669,56 @@ static int dsi_display_link_clk_enable(struct dsi_link_clks *clks,
m_clks = &clks[master_ndx];
- rc = dsi_link_clk_start(m_clks, master_ndx);
- if (rc) {
- pr_err("failed to turn on master clocks, rc=%d\n", rc);
- goto error;
+ if (l_type & DSI_LINK_LP_CLK) {
+ rc = dsi_link_lp_clk_start(&m_clks->lp_clks, master_ndx);
+ if (rc) {
+ pr_err("failed to turn on master lp link clocks, rc=%d\n",
+ rc);
+ goto error;
+ }
+ }
+
+ if (l_type & DSI_LINK_HS_CLK) {
+ rc = dsi_link_hs_clk_start(&m_clks->hs_clks,
+ DSI_LINK_CLK_START, master_ndx);
+ if (rc) {
+ pr_err("failed to turn on master hs link clocks, rc=%d\n",
+ rc);
+ goto error;
+ }
}
- /* Turn on rest of the core clocks */
for (i = 0; i < ctrl_count; i++) {
clk = &clks[i];
if (!clk || (clk == m_clks))
continue;
- rc = dsi_link_clk_start(clk, i);
- if (rc) {
- pr_err("failed to turn on clocks, rc=%d\n", rc);
- goto error_disable_master;
+ if (l_type & DSI_LINK_LP_CLK) {
+ rc = dsi_link_lp_clk_start(&clk->lp_clks, i);
+ if (rc) {
+ pr_err("failed to turn on lp link clocks, rc=%d\n",
+ rc);
+ goto error_disable_master;
+ }
+ }
+
+ if (l_type & DSI_LINK_HS_CLK) {
+ rc = dsi_link_hs_clk_start(&clk->hs_clks,
+ DSI_LINK_CLK_START, i);
+ if (rc) {
+ pr_err("failed to turn on hs link clocks, rc=%d\n",
+ rc);
+ goto error_disable_master;
+ }
}
}
return rc;
+
error_disable_master:
- (void)dsi_link_clk_stop(m_clks);
+ if (l_type == DSI_LINK_LP_CLK)
+ (void)dsi_link_lp_clk_stop(&m_clks->lp_clks);
+ else if (l_type == DSI_LINK_HS_CLK)
+ (void)dsi_link_hs_clk_stop(&m_clks->hs_clks);
error:
return rc;
}
@@ -646,7 +774,7 @@ error:
}
static int dsi_display_link_clk_disable(struct dsi_link_clks *clks,
- u32 ctrl_count, u32 master_ndx)
+ enum dsi_lclk_type l_type, u32 ctrl_count, u32 master_ndx)
{
int rc = 0;
int i;
@@ -667,35 +795,109 @@ static int dsi_display_link_clk_disable(struct dsi_link_clks *clks,
if (!clk || (clk == m_clks))
continue;
- rc = dsi_link_clk_stop(clk);
+ if (l_type & DSI_LINK_LP_CLK) {
+ rc = dsi_link_lp_clk_stop(&clk->lp_clks);
+ if (rc)
+ pr_err("failed to turn off lp link clocks, rc=%d\n",
+ rc);
+ }
+
+ if (l_type & DSI_LINK_HS_CLK) {
+ rc = dsi_link_hs_clk_stop(&clk->hs_clks);
+ if (rc)
+ pr_err("failed to turn off hs link clocks, rc=%d\n",
+ rc);
+ }
+ }
+
+ if (l_type & DSI_LINK_LP_CLK) {
+ rc = dsi_link_lp_clk_stop(&m_clks->lp_clks);
if (rc)
- pr_err("failed to turn off clocks, rc=%d\n", rc);
+ pr_err("failed to turn off master lp link clocks, rc=%d\n",
+ rc);
}
- rc = dsi_link_clk_stop(m_clks);
- if (rc)
- pr_err("failed to turn off master clocks, rc=%d\n", rc);
+ if (l_type & DSI_LINK_HS_CLK) {
+ rc = dsi_link_hs_clk_stop(&m_clks->hs_clks);
+ if (rc)
+ pr_err("failed to turn off master hs link clocks, rc=%d\n",
+ rc);
+ }
return rc;
}
-static int dsi_update_clk_state(struct dsi_core_clks *c_clks, u32 c_state,
- struct dsi_link_clks *l_clks, u32 l_state)
+static int dsi_clk_update_link_clk_state(struct dsi_clk_mngr *mngr,
+ struct dsi_link_clks *l_clks, enum dsi_lclk_type l_type, u32 l_state,
+ bool enable)
{
int rc = 0;
- struct dsi_clk_mngr *mngr;
- bool l_c_on = false;
- if (c_clks) {
- mngr =
- container_of(c_clks, struct dsi_clk_mngr, core_clks[0]);
- } else if (l_clks) {
- mngr =
- container_of(l_clks, struct dsi_clk_mngr, link_clks[0]);
+ if (!mngr)
+ return -EINVAL;
+
+ if (enable) {
+ if (mngr->pre_clkon_cb) {
+ rc = mngr->pre_clkon_cb(mngr->priv_data, DSI_LINK_CLK,
+ l_type, l_state);
+ if (rc) {
+ pr_err("pre link clk on cb failed for type %d\n",
+ l_type);
+ goto error;
+ }
+ }
+ rc = dsi_display_link_clk_enable(l_clks, l_type,
+ mngr->dsi_ctrl_count, mngr->master_ndx);
+ if (rc) {
+ pr_err("failed to start link clk type %d rc=%d\n",
+ l_type, rc);
+ goto error;
+ }
+
+ if (mngr->post_clkon_cb) {
+ rc = mngr->post_clkon_cb(mngr->priv_data, DSI_LINK_CLK,
+ l_type, l_state);
+ if (rc) {
+ pr_err("post link clk on cb failed for type %d\n",
+ l_type);
+ goto error;
+ }
+ }
} else {
- mngr = NULL;
+ if (mngr->pre_clkoff_cb) {
+ rc = mngr->pre_clkoff_cb(mngr->priv_data,
+ DSI_LINK_CLK, l_type, l_state);
+ if (rc)
+ pr_err("pre link clk off cb failed\n");
+ }
+
+ rc = dsi_display_link_clk_disable(l_clks, l_type,
+ mngr->dsi_ctrl_count, mngr->master_ndx);
+ if (rc) {
+ pr_err("failed to stop link clk type %d, rc = %d\n",
+ l_type, rc);
+ goto error;
+ }
+
+ if (mngr->post_clkoff_cb) {
+ rc = mngr->post_clkoff_cb(mngr->priv_data,
+ DSI_LINK_CLK, l_type, l_state);
+ if (rc)
+ pr_err("post link clk off cb failed\n");
+ }
}
+error:
+ return rc;
+}
+
+static int dsi_update_clk_state(struct dsi_clk_mngr *mngr,
+ struct dsi_core_clks *c_clks, u32 c_state,
+ struct dsi_link_clks *l_clks, u32 l_state)
+{
+ int rc = 0;
+ bool l_c_on = false;
+
if (!mngr)
return -EINVAL;
@@ -710,6 +912,7 @@ static int dsi_update_clk_state(struct dsi_core_clks *c_clks, u32 c_state,
if (mngr->core_clk_state == DSI_CLK_OFF) {
rc = mngr->pre_clkon_cb(mngr->priv_data,
DSI_CORE_CLK,
+ DSI_LINK_NONE,
DSI_CLK_ON);
if (rc) {
pr_err("failed to turn on MDP FS rc= %d\n", rc);
@@ -726,6 +929,7 @@ static int dsi_update_clk_state(struct dsi_core_clks *c_clks, u32 c_state,
if (mngr->post_clkon_cb) {
rc = mngr->post_clkon_cb(mngr->priv_data,
DSI_CORE_CLK,
+ DSI_LINK_NONE,
DSI_CLK_ON);
if (rc)
pr_err("post clk on cb failed, rc = %d\n", rc);
@@ -735,25 +939,15 @@ static int dsi_update_clk_state(struct dsi_core_clks *c_clks, u32 c_state,
if (l_clks) {
if (l_state == DSI_CLK_ON) {
- if (mngr->pre_clkon_cb) {
- rc = mngr->pre_clkon_cb(mngr->priv_data,
- DSI_LINK_CLK, l_state);
- if (rc)
- pr_err("pre link clk on cb failed\n");
- }
- rc = dsi_display_link_clk_enable(l_clks,
- mngr->dsi_ctrl_count, mngr->master_ndx);
- if (rc) {
- pr_err("failed to start link clk rc= %d\n", rc);
+ rc = dsi_clk_update_link_clk_state(mngr, l_clks,
+ DSI_LINK_LP_CLK, l_state, true);
+ if (rc)
+ goto error;
+
+ rc = dsi_clk_update_link_clk_state(mngr, l_clks,
+ DSI_LINK_HS_CLK, l_state, true);
+ if (rc)
goto error;
- }
- if (mngr->post_clkon_cb) {
- rc = mngr->post_clkon_cb(mngr->priv_data,
- DSI_LINK_CLK,
- l_state);
- if (rc)
- pr_err("post link clk on cb failed\n");
- }
} else {
/*
* Two conditions that need to be checked for Link
@@ -784,36 +978,26 @@ static int dsi_update_clk_state(struct dsi_core_clks *c_clks, u32 c_state,
}
rc = dsi_display_link_clk_enable(l_clks,
+ (DSI_LINK_LP_CLK & DSI_LINK_HS_CLK),
mngr->dsi_ctrl_count, mngr->master_ndx);
if (rc) {
- pr_err("Link clks did not start\n");
+ pr_err("LP Link clks did not start\n");
goto error;
}
l_c_on = true;
pr_debug("ECG: core and Link_on\n");
}
- if (mngr->pre_clkoff_cb) {
- rc = mngr->pre_clkoff_cb(mngr->priv_data,
- DSI_LINK_CLK, l_state);
- if (rc)
- pr_err("pre link clk off cb failed\n");
- }
+ rc = dsi_clk_update_link_clk_state(mngr, l_clks,
+ DSI_LINK_HS_CLK, l_state, false);
+ if (rc)
+ goto error;
- rc = dsi_display_link_clk_disable(l_clks,
- mngr->dsi_ctrl_count, mngr->master_ndx);
- if (rc) {
- pr_err("failed to stop link clk, rc = %d\n",
- rc);
+ rc = dsi_clk_update_link_clk_state(mngr, l_clks,
+ DSI_LINK_LP_CLK, l_state, false);
+ if (rc)
goto error;
- }
- if (mngr->post_clkoff_cb) {
- rc = mngr->post_clkoff_cb(mngr->priv_data,
- DSI_LINK_CLK, l_state);
- if (rc)
- pr_err("post link clk off cb failed\n");
- }
/*
* This check is to save unnecessary clock state
* change when going from EARLY_GATE to OFF. In the
@@ -872,6 +1056,7 @@ static int dsi_update_clk_state(struct dsi_core_clks *c_clks, u32 c_state,
if (mngr->pre_clkoff_cb) {
rc = mngr->pre_clkoff_cb(mngr->priv_data,
DSI_CORE_CLK,
+ DSI_LINK_NONE,
c_state);
if (rc)
pr_err("pre core clk off cb failed\n");
@@ -888,6 +1073,7 @@ static int dsi_update_clk_state(struct dsi_core_clks *c_clks, u32 c_state,
if (mngr->post_clkoff_cb) {
rc = mngr->post_clkoff_cb(mngr->priv_data,
DSI_CORE_CLK,
+ DSI_LINK_NONE,
DSI_CLK_OFF);
if (rc)
pr_err("post clkoff cb fail, rc = %d\n",
@@ -957,7 +1143,7 @@ static int dsi_recheck_clk_state(struct dsi_clk_mngr *mngr)
old_l_clk_state, new_link_clk_state);
if (c_clks || l_clks) {
- rc = dsi_update_clk_state(c_clks, new_core_clk_state,
+ rc = dsi_update_clk_state(mngr, c_clks, new_core_clk_state,
l_clks, new_link_clk_state);
if (rc) {
pr_err("failed to update clock state, rc = %d\n", rc);
@@ -1095,7 +1281,8 @@ static int dsi_display_link_clk_force_update(void *client)
}
rc = dsi_display_link_clk_disable(l_clks,
- mngr->dsi_ctrl_count, mngr->master_ndx);
+ (DSI_LINK_LP_CLK | DSI_LINK_HS_CLK),
+ mngr->dsi_ctrl_count, mngr->master_ndx);
if (rc) {
pr_err("%s, failed to stop link clk, rc = %d\n",
__func__, rc);
@@ -1103,7 +1290,8 @@ static int dsi_display_link_clk_force_update(void *client)
}
rc = dsi_display_link_clk_enable(l_clks,
- mngr->dsi_ctrl_count, mngr->master_ndx);
+ (DSI_LINK_LP_CLK | DSI_LINK_HS_CLK),
+ mngr->dsi_ctrl_count, mngr->master_ndx);
if (rc) {
pr_err("%s, failed to start link clk rc= %d\n",
__func__, rc);
@@ -1267,8 +1455,10 @@ void *dsi_display_clk_mngr_register(struct dsi_clk_info *info)
for (i = 0; i < mngr->dsi_ctrl_count; i++) {
memcpy(&mngr->core_clks[i].clks, &info->c_clks[i],
sizeof(struct dsi_core_clk_info));
- memcpy(&mngr->link_clks[i].clks, &info->l_clks[i],
- sizeof(struct dsi_link_clk_info));
+ memcpy(&mngr->link_clks[i].hs_clks, &info->l_hs_clks[i],
+ sizeof(struct dsi_link_hs_clk_info));
+ memcpy(&mngr->link_clks[i].lp_clks, &info->l_lp_clks[i],
+ sizeof(struct dsi_link_lp_clk_info));
mngr->core_clks[i].bus_handle = info->bus_handle[i];
mngr->ctrl_index[i] = info->ctrl_index[i];
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 4899d9f51d03..13c3c31cd9b0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -498,7 +498,8 @@ static int dsi_ctrl_init_regmap(struct platform_device *pdev,
static int dsi_ctrl_clocks_deinit(struct dsi_ctrl *ctrl)
{
struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
- struct dsi_link_clk_info *link = &ctrl->clk_info.link_clks;
+ struct dsi_link_lp_clk_info *lp_link = &ctrl->clk_info.lp_link_clks;
+ struct dsi_link_hs_clk_info *hs_link = &ctrl->clk_info.hs_link_clks;
struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
if (core->mdp_core_clk)
@@ -514,16 +515,17 @@ static int dsi_ctrl_clocks_deinit(struct dsi_ctrl *ctrl)
memset(core, 0x0, sizeof(*core));
- if (link->byte_clk)
- devm_clk_put(&ctrl->pdev->dev, link->byte_clk);
- if (link->pixel_clk)
- devm_clk_put(&ctrl->pdev->dev, link->pixel_clk);
- if (link->esc_clk)
- devm_clk_put(&ctrl->pdev->dev, link->esc_clk);
- if (link->byte_intf_clk)
- devm_clk_put(&ctrl->pdev->dev, link->byte_intf_clk);
+ if (hs_link->byte_clk)
+ devm_clk_put(&ctrl->pdev->dev, hs_link->byte_clk);
+ if (hs_link->pixel_clk)
+ devm_clk_put(&ctrl->pdev->dev, hs_link->pixel_clk);
+ if (lp_link->esc_clk)
+ devm_clk_put(&ctrl->pdev->dev, lp_link->esc_clk);
+ if (hs_link->byte_intf_clk)
+ devm_clk_put(&ctrl->pdev->dev, hs_link->byte_intf_clk);
- memset(link, 0x0, sizeof(*link));
+ memset(hs_link, 0x0, sizeof(*hs_link));
+ memset(lp_link, 0x0, sizeof(*lp_link));
if (rcg->byte_clk)
devm_clk_put(&ctrl->pdev->dev, rcg->byte_clk);
@@ -540,7 +542,8 @@ static int dsi_ctrl_clocks_init(struct platform_device *pdev,
{
int rc = 0;
struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
- struct dsi_link_clk_info *link = &ctrl->clk_info.link_clks;
+ struct dsi_link_lp_clk_info *lp_link = &ctrl->clk_info.lp_link_clks;
+ struct dsi_link_hs_clk_info *hs_link = &ctrl->clk_info.hs_link_clks;
struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
@@ -573,30 +576,30 @@ static int dsi_ctrl_clocks_init(struct platform_device *pdev,
pr_debug("can't get mnoc clock, rc=%d\n", rc);
}
- link->byte_clk = devm_clk_get(&pdev->dev, "byte_clk");
- if (IS_ERR(link->byte_clk)) {
- rc = PTR_ERR(link->byte_clk);
+ hs_link->byte_clk = devm_clk_get(&pdev->dev, "byte_clk");
+ if (IS_ERR(hs_link->byte_clk)) {
+ rc = PTR_ERR(hs_link->byte_clk);
pr_err("failed to get byte_clk, rc=%d\n", rc);
goto fail;
}
- link->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk");
- if (IS_ERR(link->pixel_clk)) {
- rc = PTR_ERR(link->pixel_clk);
+ hs_link->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk");
+ if (IS_ERR(hs_link->pixel_clk)) {
+ rc = PTR_ERR(hs_link->pixel_clk);
pr_err("failed to get pixel_clk, rc=%d\n", rc);
goto fail;
}
- link->esc_clk = devm_clk_get(&pdev->dev, "esc_clk");
- if (IS_ERR(link->esc_clk)) {
- rc = PTR_ERR(link->esc_clk);
+ lp_link->esc_clk = devm_clk_get(&pdev->dev, "esc_clk");
+ if (IS_ERR(lp_link->esc_clk)) {
+ rc = PTR_ERR(lp_link->esc_clk);
pr_err("failed to get esc_clk, rc=%d\n", rc);
goto fail;
}
- link->byte_intf_clk = devm_clk_get(&pdev->dev, "byte_intf_clk");
- if (IS_ERR(link->byte_intf_clk)) {
- link->byte_intf_clk = NULL;
+ hs_link->byte_intf_clk = devm_clk_get(&pdev->dev, "byte_intf_clk");
+ if (IS_ERR(hs_link->byte_intf_clk)) {
+ hs_link->byte_intf_clk = NULL;
pr_debug("can't find byte intf clk, rc=%d\n", rc);
}
@@ -1227,9 +1230,10 @@ kickoff:
}
}
- if (dsi_ctrl->hw.ops.mask_error_intr)
+ if (dsi_ctrl->hw.ops.mask_error_intr &&
+ !dsi_ctrl->esd_check_underway)
dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
- BIT(DSI_FIFO_OVERFLOW), false);
+ BIT(DSI_FIFO_OVERFLOW), false);
dsi_ctrl->hw.ops.reset_cmd_fifo(&dsi_ctrl->hw);
/*
@@ -2555,6 +2559,16 @@ void dsi_ctrl_isr_configure(struct dsi_ctrl *dsi_ctrl, bool enable)
mutex_unlock(&dsi_ctrl->ctrl_lock);
}
+void dsi_ctrl_set_continuous_clk(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+ if (!dsi_ctrl)
+ return;
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ dsi_ctrl->hw.ops.set_continuous_clk(&dsi_ctrl->hw, enable);
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+}
+
int dsi_ctrl_soft_reset(struct dsi_ctrl *dsi_ctrl)
{
if (!dsi_ctrl)
@@ -2677,7 +2691,12 @@ int dsi_ctrl_update_host_config(struct dsi_ctrl *ctrl,
goto error;
}
- if (!(flags & (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR))) {
+ if (!(flags & (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR |
+ DSI_MODE_FLAG_DYN_CLK))) {
+ /*
+ * for dynamic clk swith case link frequence would
+ * be updated dsi_display_dynamic_clk_switch().
+ */
rc = dsi_ctrl_update_link_freqs(ctrl, config, clk_handle);
if (rc) {
pr_err("[%s] failed to update link frequencies, rc=%d\n",
@@ -2840,7 +2859,8 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
dsi_ctrl->cell_index);
}
}
- if (dsi_ctrl->hw.ops.mask_error_intr)
+ if (dsi_ctrl->hw.ops.mask_error_intr &&
+ !dsi_ctrl->esd_check_underway)
dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw,
BIT(DSI_FIFO_OVERFLOW), false);
@@ -3338,7 +3358,8 @@ u32 dsi_ctrl_collect_misr(struct dsi_ctrl *dsi_ctrl)
return misr;
}
-void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl)
+void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl, u32 idx,
+ bool mask_enable)
{
if (!dsi_ctrl || !dsi_ctrl->hw.ops.error_intr_ctrl
|| !dsi_ctrl->hw.ops.clear_error_status) {
@@ -3351,9 +3372,23 @@ void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl)
* register
*/
mutex_lock(&dsi_ctrl->ctrl_lock);
- dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, false);
- dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ if (idx & BIT(DSI_ERR_INTR_ALL)) {
+ /*
+ * The behavior of mask_enable is different in ctrl register
+ * and mask register and hence mask_enable is manipulated for
+ * selective error interrupt masking vs total error interrupt
+ * masking.
+ */
+
+ dsi_ctrl->hw.ops.error_intr_ctrl(&dsi_ctrl->hw, !mask_enable);
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ DSI_ERROR_INTERRUPT_COUNT);
+ } else {
+ dsi_ctrl->hw.ops.mask_error_intr(&dsi_ctrl->hw, idx,
+ mask_enable);
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
DSI_ERROR_INTERRUPT_COUNT);
+ }
mutex_unlock(&dsi_ctrl->ctrl_lock);
}
@@ -3380,6 +3415,27 @@ void dsi_ctrl_irq_update(struct dsi_ctrl *dsi_ctrl, bool enable)
}
/**
+ * dsi_ctrl_wait4dynamic_refresh_done() - Poll for dynamci refresh
+ * done interrupt.
+ * @dsi_ctrl: DSI controller handle.
+ */
+int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl)
+{
+ int rc = 0;
+
+ if (!ctrl)
+ return 0;
+
+ mutex_lock(&ctrl->ctrl_lock);
+
+ if (ctrl->hw.ops.wait4dynamic_refresh_done)
+ rc = ctrl->hw.ops.wait4dynamic_refresh_done(&ctrl->hw);
+
+ mutex_unlock(&ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
* dsi_ctrl_drv_register() - register platform driver for dsi controller
*/
void dsi_ctrl_drv_register(void)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index 79c4c27bf2fc..eb00e428f7ea 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -109,7 +109,8 @@ struct dsi_ctrl_power_info {
/**
* struct dsi_ctrl_clk_info - clock information for DSI controller
* @core_clks: Core clocks needed to access DSI controller registers.
- * @link_clks: Link clocks required to transmit data over DSI link.
+ * @hs_link_clks: Clocks required to transmit high speed data over DSI
+ * @lp_link_clks: Clocks required to perform low power ops over DSI
* @rcg_clks: Root clock generation clocks generated in MMSS_CC. The
* output of the PLL is set as parent for these root
* clocks. These clocks are specific to controller
@@ -123,7 +124,8 @@ struct dsi_ctrl_power_info {
struct dsi_ctrl_clk_info {
/* Clocks parsed from DT */
struct dsi_core_clk_info core_clks;
- struct dsi_link_clk_info link_clks;
+ struct dsi_link_hs_clk_info hs_link_clks;
+ struct dsi_link_lp_clk_info lp_link_clks;
struct dsi_clk_link_set rcg_clks;
/* Clocks set by DSI Manager */
@@ -216,6 +218,7 @@ struct dsi_ctrl_interrupts {
* @cmd_buffer_size: Size of command buffer.
* @vaddr: CPU virtual address of cmd buffer.
* @secure_mode: Indicates if secure-session is in progress
+ * @esd_check_underway: Indicates if esd status check is in progress
* @debugfs_root: Root for debugfs entries.
* @misr_enable: Frame MISR enable/disable
* @misr_cache: Cached Frame MISR value
@@ -261,6 +264,7 @@ struct dsi_ctrl {
u32 cmd_len;
void *vaddr;
bool secure_mode;
+ bool esd_check_underway;
/* Debug Information */
struct dentry *debugfs_root;
@@ -738,8 +742,11 @@ void dsi_ctrl_isr_configure(struct dsi_ctrl *dsi_ctrl, bool enable);
* dsi_ctrl_mask_error_status_interrupts() - API to mask dsi ctrl error status
* interrupts
* @dsi_ctrl: DSI controller handle.
+ * @idx: id indicating which interrupts to enable/disable.
+ * @mask_enable: boolean to enable/disable masking.
*/
-void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl);
+void dsi_ctrl_mask_error_status_interrupts(struct dsi_ctrl *dsi_ctrl, u32 idx,
+ bool mask_enable);
/**
* dsi_ctrl_irq_update() - Put a irq vote to process DSI error
@@ -762,4 +769,17 @@ int dsi_ctrl_get_host_engine_init_state(struct dsi_ctrl *dsi_ctrl,
*/
int dsi_ctrl_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl *dsi_ctrl);
+/**
+ * dsi_ctrl_set_continuous_clk() - API to set/unset force clock lane HS request.
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: variable to control continuous clock.
+ */
+void dsi_ctrl_set_continuous_clk(struct dsi_ctrl *dsi_ctrl, bool enable);
+
+/**
+ * dsi_ctrl_wait4dynamic_refresh_done() - Poll for dynamic refresh done
+ * interrupt.
+ * @dsi_ctrl: DSI controller handle.
+ */
+int dsi_ctrl_wait4dynamic_refresh_done(struct dsi_ctrl *ctrl);
#endif /* _DSI_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index d6fab59db44b..2ce8c38f2652 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -805,6 +805,19 @@ struct dsi_ctrl_hw_ops {
* @ctrl: Pointer to the controller host hardware.
*/
int (*wait_for_cmd_mode_mdp_idle)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * hw.ops.set_continuous_clk() - Set continuous clock
+ * @ctrl: Pointer to the controller host hardware.
+ * @enable: Bool to control continuous clock request.
+ */
+ void (*set_continuous_clk)(struct dsi_ctrl_hw *ctrl, bool enable);
+
+ /**
+ * hw.ops.wait4dynamic_refresh_done() - Wait for dynamic refresh done
+ * @ctrl: Pointer to the controller host hardware.
+ */
+ int (*wait4dynamic_refresh_done)(struct dsi_ctrl_hw *ctrl);
};
/*
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index c1af52f84b91..8cf77e4bf8d9 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -1418,17 +1418,20 @@ void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx, bool en)
reg = DSI_R32(ctrl, 0x10c);
if (idx & BIT(DSI_FIFO_OVERFLOW)) {
- if (en)
- reg |= (0xf << 16);
- else
- reg &= ~(0xf << 16);
+ if (en) {
+ reg |= (0x1f << 16);
+ reg |= BIT(9);
+ } else {
+ reg &= ~(0x1f << 16);
+ reg &= ~BIT(9);
+ }
}
if (idx & BIT(DSI_FIFO_UNDERFLOW)) {
if (en)
- reg |= (0xf << 26);
+ reg |= (0x1b << 26);
else
- reg &= ~(0xf << 26);
+ reg &= ~(0x1b << 26);
}
if (idx & BIT(DSI_LP_Rx_TIMEOUT)) {
@@ -1438,6 +1441,13 @@ void dsi_ctrl_hw_cmn_mask_error_intr(struct dsi_ctrl_hw *ctrl, u32 idx, bool en)
reg &= ~(0x7 << 23);
}
+ if (idx & BIT(DSI_PLL_UNLOCK_ERR)) {
+ if (en)
+ reg |= BIT(28);
+ else
+ reg &= ~BIT(28);
+ }
+
DSI_W32(ctrl, 0x10c, reg);
wmb(); /* ensure error is masked */
}
@@ -1491,3 +1501,38 @@ int dsi_ctrl_hw_cmn_wait_for_cmd_mode_mdp_idle(struct dsi_ctrl_hw *ctrl)
return rc;
}
+
+void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+ if (enable)
+ reg |= BIT(28);
+ else
+ reg &= ~BIT(28);
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+ wmb(); /* make sure request is set */
+}
+
+int dsi_ctrl_hw_cmn_wait4dynamic_refresh_done(struct dsi_ctrl_hw *ctrl)
+{
+ int rc;
+ u32 const sleep_us = 1000;
+ u32 const timeout_us = 84000; /* approximately 5 vsyncs */
+ u32 reg = 0, dyn_refresh_done = BIT(28);
+
+ rc = readl_poll_timeout(ctrl->base + DSI_INT_CTRL, reg,
+ (reg & dyn_refresh_done), sleep_us, timeout_us);
+ if (rc) {
+ pr_err("wait4dynamic refresh timedout %d\n", rc);
+ return rc;
+ }
+
+ /* ack dynamic refresh done status */
+ reg = DSI_R32(ctrl, DSI_INT_CTRL);
+ reg |= dyn_refresh_done;
+ DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
index 39ac0214283e..0ee8b39ac7a2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -138,44 +138,7 @@
#define DSI_SCRATCH_REGISTER_1 (0x01F8)
#define DSI_SCRATCH_REGISTER_2 (0x01FC)
#define DSI_DYNAMIC_REFRESH_CTRL (0x0200)
-#define DSI_DYNAMIC_REFRESH_PIPE_DELAY (0x0204)
-#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2 (0x0208)
-#define DSI_DYNAMIC_REFRESH_PLL_DELAY (0x020C)
#define DSI_DYNAMIC_REFRESH_STATUS (0x0210)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL0 (0x0214)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL1 (0x0218)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL2 (0x021C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL3 (0x0220)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL4 (0x0224)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL5 (0x0228)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL6 (0x022C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL7 (0x0230)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL8 (0x0234)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL9 (0x0238)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL10 (0x023C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL11 (0x0240)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL12 (0x0244)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL13 (0x0248)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL14 (0x024C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL15 (0x0250)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL16 (0x0254)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL17 (0x0258)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL18 (0x025C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL19 (0x0260)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL20 (0x0264)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL21 (0x0268)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL22 (0x026C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL23 (0x0270)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL24 (0x0274)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL25 (0x0278)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL26 (0x027C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL27 (0x0280)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL28 (0x0284)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL29 (0x0288)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL30 (0x028C)
-#define DSI_DYNAMIC_REFRESH_PLL_CTRL31 (0x0290)
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR (0x0294)
-#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2 (0x0298)
#define DSI_VIDEO_COMPRESSION_MODE_CTRL (0x02A0)
#define DSI_VIDEO_COMPRESSION_MODE_CTRL2 (0x02A4)
#define DSI_COMMAND_COMPRESSION_MODE_CTRL (0x02A8)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index d45f8493d29d..f905824d56f9 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -79,6 +79,7 @@ enum dsi_op_mode {
* @DSI_MODE_FLAG_DMS: Seamless transition is dynamic mode switch
* @DSI_MODE_FLAG_VRR: Seamless transition is DynamicFPS.
* New timing values are sent from DAL.
+ * @DSI_MODE_FLAG_DYN_CLK: Seamless transition is dynamic clock change
*/
enum dsi_mode_flags {
DSI_MODE_FLAG_SEAMLESS = BIT(0),
@@ -86,6 +87,7 @@ enum dsi_mode_flags {
DSI_MODE_FLAG_VBLANK_PRE_MODESET = BIT(2),
DSI_MODE_FLAG_DMS = BIT(3),
DSI_MODE_FLAG_VRR = BIT(4),
+ DSI_MODE_FLAG_DYN_CLK = BIT(5),
};
/**
@@ -404,6 +406,7 @@ struct dsi_mode_info {
* @ignore_rx_eot: Ignore Rx EOT packets if set to true.
* @append_tx_eot: Append EOT packets for forward transmissions if set to
* true.
+ * @force_hs_clk_lane: Send continuous clock to the panel.
*/
struct dsi_host_common_cfg {
enum dsi_pixel_format dst_format;
@@ -422,6 +425,7 @@ struct dsi_host_common_cfg {
u32 t_clk_pre;
bool ignore_rx_eot;
bool append_tx_eot;
+ bool force_hs_clk_lane;
};
/**
@@ -590,11 +594,50 @@ struct dsi_event_cb_info {
* @DSI_FIFO_OVERFLOW: DSI FIFO Overflow error
* @DSI_FIFO_UNDERFLOW: DSI FIFO Underflow error
* @DSI_LP_Rx_TIMEOUT: DSI LP/RX Timeout error
+ * @DSI_PLL_UNLOCK_ERR: DSI PLL unlock error
*/
enum dsi_error_status {
DSI_FIFO_OVERFLOW = 1,
DSI_FIFO_UNDERFLOW,
DSI_LP_Rx_TIMEOUT,
+ DSI_PLL_UNLOCK_ERR,
+ DSI_ERR_INTR_ALL,
};
+/* structure containing the delays required for dynamic clk */
+struct dsi_dyn_clk_delay {
+ u32 pipe_delay;
+ u32 pipe_delay2;
+ u32 pll_delay;
+};
+
+/* dynamic refresh control bits */
+enum dsi_dyn_clk_control_bits {
+ DYN_REFRESH_INTF_SEL = 1,
+ DYN_REFRESH_SYNC_MODE,
+ DYN_REFRESH_SW_TRIGGER,
+ DYN_REFRESH_SWI_CTRL,
+};
+
+/* convert dsi pixel format into bits per pixel */
+static inline int dsi_pixel_format_to_bpp(enum dsi_pixel_format fmt)
+{
+ switch (fmt) {
+ case DSI_PIXEL_FORMAT_RGB888:
+ case DSI_PIXEL_FORMAT_MAX:
+ return 24;
+ case DSI_PIXEL_FORMAT_RGB666:
+ case DSI_PIXEL_FORMAT_RGB666_LOOSE:
+ return 18;
+ case DSI_PIXEL_FORMAT_RGB565:
+ return 16;
+ case DSI_PIXEL_FORMAT_RGB111:
+ return 3;
+ case DSI_PIXEL_FORMAT_RGB332:
+ return 8;
+ case DSI_PIXEL_FORMAT_RGB444:
+ return 12;
+ }
+ return 24;
+}
#endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index b60ffa14a2ec..7e55ce3544c4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -59,7 +59,8 @@ static const struct of_device_id dsi_display_dt_match[] = {
static struct dsi_display *primary_display;
static struct dsi_display *secondary_display;
-static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display)
+static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display,
+ u32 mask, bool enable)
{
int i;
struct dsi_display_ctrl *ctrl;
@@ -72,7 +73,25 @@ static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display)
ctrl = &display->ctrl[i];
if (!ctrl)
continue;
- dsi_ctrl_mask_error_status_interrupts(ctrl->ctrl);
+ dsi_ctrl_mask_error_status_interrupts(ctrl->ctrl, mask, enable);
+ }
+}
+
+static void dsi_display_set_ctrl_esd_check_flag(struct dsi_display *display,
+ bool enable)
+{
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ if (!display)
+ return;
+
+ for (i = 0; (i < display->ctrl_count) &&
+ (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl)
+ continue;
+ ctrl->ctrl->esd_check_underway = enable;
}
}
@@ -674,10 +693,6 @@ static int dsi_display_status_reg_read(struct dsi_display *display)
}
}
exit:
- /* mask only error interrupts */
- if (rc <= 0)
- dsi_display_mask_ctrl_error_interrupts(display);
-
dsi_display_cmd_engine_disable(display);
done:
return rc;
@@ -718,6 +733,7 @@ int dsi_display_check_status(void *display, bool te_check_override)
struct dsi_panel *panel;
u32 status_mode;
int rc = 0x1;
+ u32 mask;
if (dsi_display == NULL)
return -EINVAL;
@@ -739,6 +755,11 @@ int dsi_display_check_status(void *display, bool te_check_override)
dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
DSI_ALL_CLKS, DSI_CLK_ON);
+ /* Mask error interrupts before attempting ESD read */
+ mask = BIT(DSI_FIFO_OVERFLOW) | BIT(DSI_FIFO_UNDERFLOW);
+ dsi_display_set_ctrl_esd_check_flag(dsi_display, true);
+ dsi_display_mask_ctrl_error_interrupts(dsi_display, mask, true);
+
if (status_mode == ESD_MODE_REG_READ) {
rc = dsi_display_status_reg_read(dsi_display);
} else if (status_mode == ESD_MODE_SW_BTA) {
@@ -750,6 +771,13 @@ int dsi_display_check_status(void *display, bool te_check_override)
panel->esd_config.esd_enabled = false;
}
+ /* Unmask error interrupts */
+ if (rc > 0) {
+ dsi_display_set_ctrl_esd_check_flag(dsi_display, false);
+ dsi_display_mask_ctrl_error_interrupts(dsi_display, mask,
+ false);
+ }
+
dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
DSI_ALL_CLKS, DSI_CLK_OFF);
mutex_unlock(&dsi_display->display_lock);
@@ -838,6 +866,21 @@ end:
return rc;
}
+static void _dsi_display_continuous_clk_ctrl(struct dsi_display *display,
+ bool enable)
+{
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ if (!display || !display->panel->host_config.force_hs_clk_lane)
+ return;
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ dsi_ctrl_set_continuous_clk(ctrl->ctrl, enable);
+ }
+}
+
int dsi_display_soft_reset(void *display)
{
struct dsi_display *dsi_display;
@@ -1078,7 +1121,7 @@ static ssize_t debugfs_misr_read(struct file *file,
return 0;
buf = kzalloc(max_len, GFP_KERNEL);
- if (!buf)
+ if (ZERO_OR_NULL_PTR(buf))
return -ENOMEM;
mutex_lock(&display->display_lock);
@@ -1109,7 +1152,7 @@ static ssize_t debugfs_misr_read(struct file *file,
goto error;
}
- if (copy_to_user(user_buf, buf, len)) {
+ if (copy_to_user(user_buf, buf, max_len)) {
rc = -EFAULT;
goto error;
}
@@ -1141,6 +1184,9 @@ static ssize_t debugfs_esd_trigger_check(struct file *file,
if (user_len > sizeof(u32))
return -EINVAL;
+ if (!user_len || !user_buf)
+ return -EINVAL;
+
buf = kzalloc(user_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1196,7 +1242,7 @@ static ssize_t debugfs_alter_esd_check_mode(struct file *file,
return 0;
buf = kzalloc(len, GFP_KERNEL);
- if (!buf)
+ if (ZERO_OR_NULL_PTR(buf))
return -ENOMEM;
if (copy_from_user(buf, user_buf, user_len)) {
@@ -1268,7 +1314,7 @@ static ssize_t debugfs_read_esd_check_mode(struct file *file,
}
buf = kzalloc(len, GFP_KERNEL);
- if (!buf)
+ if (ZERO_OR_NULL_PTR(buf))
return -ENOMEM;
esd_config = &display->panel->esd_config;
@@ -1453,14 +1499,12 @@ static int dsi_display_debugfs_deinit(struct dsi_display *display)
static void adjust_timing_by_ctrl_count(const struct dsi_display *display,
struct dsi_display_mode *mode)
{
- if (display->ctrl_count > 1) {
- mode->timing.h_active /= display->ctrl_count;
- mode->timing.h_front_porch /= display->ctrl_count;
- mode->timing.h_sync_width /= display->ctrl_count;
- mode->timing.h_back_porch /= display->ctrl_count;
- mode->timing.h_skew /= display->ctrl_count;
- mode->pixel_clk_khz /= display->ctrl_count;
- }
+ mode->timing.h_active /= display->ctrl_count;
+ mode->timing.h_front_porch /= display->ctrl_count;
+ mode->timing.h_sync_width /= display->ctrl_count;
+ mode->timing.h_back_porch /= display->ctrl_count;
+ mode->timing.h_skew /= display->ctrl_count;
+ mode->pixel_clk_khz /= display->ctrl_count;
}
static int dsi_display_is_ulps_req_valid(struct dsi_display *display,
@@ -1592,9 +1636,19 @@ static int dsi_display_set_clamp(struct dsi_display *display, bool enable)
m_ctrl = &display->ctrl[display->cmd_master_idx];
ulps_enabled = display->ulps_enabled;
+ /*
+ * Clamp control can be either through the DSI controller or
+ * the DSI PHY depending on hardware variation
+ */
rc = dsi_ctrl_set_clamp_state(m_ctrl->ctrl, enable, ulps_enabled);
if (rc) {
- pr_err("DSI Clamp state change(%d) failed\n", enable);
+ pr_err("DSI ctrl clamp state change(%d) failed\n", enable);
+ return rc;
+ }
+
+ rc = dsi_phy_set_clamp_state(m_ctrl->phy, enable);
+ if (rc) {
+ pr_err("DSI phy clamp state change(%d) failed\n", enable);
return rc;
}
@@ -1608,7 +1662,18 @@ static int dsi_display_set_clamp(struct dsi_display *display, bool enable)
pr_err("DSI Clamp state change(%d) failed\n", enable);
return rc;
}
+
+ rc = dsi_phy_set_clamp_state(ctrl->phy, enable);
+ if (rc) {
+ pr_err("DSI phy clamp state change(%d) failed\n",
+ enable);
+ return rc;
+ }
+
+ pr_debug("Clamps %s for ctrl%d\n",
+ enable ? "enabled" : "disabled", i);
}
+
display->clamp_enabled = enable;
return 0;
}
@@ -2147,7 +2212,7 @@ static int dsi_display_set_clk_src(struct dsi_display *display)
m_ctrl = &display->ctrl[display->clk_master_idx];
rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl,
- &display->clock_info.src_clks);
+ &display->clock_info.mux_clks);
if (rc) {
pr_err("[%s] failed to set source clocks for master, rc=%d\n",
display->name, rc);
@@ -2161,7 +2226,7 @@ static int dsi_display_set_clk_src(struct dsi_display *display)
continue;
rc = dsi_ctrl_set_clock_source(ctrl->ctrl,
- &display->clock_info.src_clks);
+ &display->clock_info.mux_clks);
if (rc) {
pr_err("[%s] failed to set source clocks, rc=%d\n",
display->name, rc);
@@ -2860,50 +2925,53 @@ static int dsi_display_clocks_init(struct dsi_display *display)
struct dsi_clk_link_set *src = &display->clock_info.src_clks;
struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+ struct dsi_dyn_clk_caps *dyn_clk_caps = &(display->panel->dyn_clk_caps);
+
+ mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
+ if (IS_ERR_OR_NULL(mux->byte_clk)) {
+ rc = PTR_ERR(mux->byte_clk);
+ pr_err("failed to get mux_byte_clk, rc=%d\n", rc);
+ mux->byte_clk = NULL;
+ goto error;
+ };
+
+ mux->pixel_clk = devm_clk_get(&display->pdev->dev, "mux_pixel_clk");
+ if (IS_ERR_OR_NULL(mux->pixel_clk)) {
+ rc = PTR_ERR(mux->pixel_clk);
+ mux->pixel_clk = NULL;
+ pr_err("failed to get mux_pixel_clk, rc=%d\n", rc);
+ goto error;
+ };
src->byte_clk = devm_clk_get(&display->pdev->dev, "src_byte_clk");
if (IS_ERR_OR_NULL(src->byte_clk)) {
rc = PTR_ERR(src->byte_clk);
src->byte_clk = NULL;
pr_err("failed to get src_byte_clk, rc=%d\n", rc);
- goto error;
- }
-
- src->pixel_clk = devm_clk_get(&display->pdev->dev, "src_pixel_clk");
- if (IS_ERR_OR_NULL(src->pixel_clk)) {
- rc = PTR_ERR(src->pixel_clk);
- src->pixel_clk = NULL;
- pr_err("failed to get src_pixel_clk, rc=%d\n", rc);
- goto error;
- }
-
- mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
- if (IS_ERR_OR_NULL(mux->byte_clk)) {
- rc = PTR_ERR(mux->byte_clk);
- pr_debug("failed to get mux_byte_clk, rc=%d\n", rc);
- mux->byte_clk = NULL;
/*
* Skip getting rest of clocks since one failed. This is a
* non-critical failure since these clocks are requied only for
* dynamic refresh use cases.
*/
rc = 0;
+ dyn_clk_caps->dyn_clk_support = false;
goto done;
- };
+ }
- mux->pixel_clk = devm_clk_get(&display->pdev->dev, "mux_pixel_clk");
- if (IS_ERR_OR_NULL(mux->pixel_clk)) {
- rc = PTR_ERR(mux->pixel_clk);
- mux->pixel_clk = NULL;
- pr_debug("failed to get mux_pixel_clk, rc=%d\n", rc);
+ src->pixel_clk = devm_clk_get(&display->pdev->dev, "src_pixel_clk");
+ if (IS_ERR_OR_NULL(src->pixel_clk)) {
+ rc = PTR_ERR(src->pixel_clk);
+ src->pixel_clk = NULL;
+ pr_err("failed to get src_pixel_clk, rc=%d\n", rc);
/*
* Skip getting rest of clocks since one failed. This is a
* non-critical failure since these clocks are requied only for
* dynamic refresh use cases.
*/
rc = 0;
+ dyn_clk_caps->dyn_clk_support = false;
goto done;
- };
+ }
shadow->byte_clk = devm_clk_get(&display->pdev->dev, "shadow_byte_clk");
if (IS_ERR_OR_NULL(shadow->byte_clk)) {
@@ -2916,6 +2984,7 @@ static int dsi_display_clocks_init(struct dsi_display *display)
* dynamic refresh use cases.
*/
rc = 0;
+ dyn_clk_caps->dyn_clk_support = false;
goto done;
};
@@ -2931,6 +3000,7 @@ static int dsi_display_clocks_init(struct dsi_display *display)
* dynamic refresh use cases.
*/
rc = 0;
+ dyn_clk_caps->dyn_clk_support = false;
goto done;
};
@@ -2998,12 +3068,20 @@ static void dsi_display_ctrl_isr_configure(struct dsi_display *display, bool en)
int dsi_pre_clkoff_cb(void *priv,
enum dsi_clk_type clk,
+ enum dsi_lclk_type l_type,
enum dsi_clk_state new_state)
{
int rc = 0;
struct dsi_display *display = priv;
- if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF)) {
+ if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) &&
+ (l_type && DSI_LINK_LP_CLK)) {
+ /*
+ * If continuous clock is enabled then disable it
+ * before entering into ULPS Mode.
+ */
+ if (display->panel->host_config.force_hs_clk_lane)
+ _dsi_display_continuous_clk_ctrl(display, false);
/*
* If ULPS feature is enabled, enter ULPS first.
* However, when blanking the panel, we should enter ULPS
@@ -3053,13 +3131,14 @@ int dsi_pre_clkoff_cb(void *priv,
int dsi_post_clkon_cb(void *priv,
enum dsi_clk_type clk,
+ enum dsi_lclk_type l_type,
enum dsi_clk_state curr_state)
{
int rc = 0;
struct dsi_display *display = priv;
bool mmss_clamp = false;
- if (clk & DSI_CORE_CLK) {
+ if ((clk & DSI_LINK_CLK) && (l_type & DSI_LINK_LP_CLK)) {
mmss_clamp = display->clamp_enabled;
/*
* controller setup is needed if coming out of idle
@@ -3068,6 +3147,13 @@ int dsi_post_clkon_cb(void *priv,
if (mmss_clamp)
dsi_display_ctrl_setup(display);
+ /*
+ * Phy setup is needed if coming out of idle
+ * power collapse with clamps enabled.
+ */
+ if (display->phy_idle_power_off || mmss_clamp)
+ dsi_display_phy_idle_on(display, mmss_clamp);
+
if (display->ulps_enabled && mmss_clamp) {
/*
* ULPS Entry Request. This is needed if the lanes were
@@ -3106,17 +3192,11 @@ int dsi_post_clkon_cb(void *priv,
goto error;
}
- /*
- * Phy setup is needed if coming out of idle
- * power collapse with clamps enabled.
- */
- if (display->phy_idle_power_off || mmss_clamp)
- dsi_display_phy_idle_on(display, mmss_clamp);
-
/* enable dsi to serve irqs */
dsi_display_ctrl_irq_update(display, true);
}
- if (clk & DSI_LINK_CLK) {
+
+ if ((clk & DSI_LINK_CLK) && (l_type & DSI_LINK_HS_CLK)) {
/*
* Toggle the resync FIFO everytime clock changes, except
* when cont-splash screen transition is going on.
@@ -3134,6 +3214,9 @@ int dsi_post_clkon_cb(void *priv,
goto error;
}
}
+
+ if (display->panel->host_config.force_hs_clk_lane)
+ _dsi_display_continuous_clk_ctrl(display, true);
}
error:
return rc;
@@ -3141,6 +3224,7 @@ error:
int dsi_post_clkoff_cb(void *priv,
enum dsi_clk_type clk_type,
+ enum dsi_lclk_type l_type,
enum dsi_clk_state curr_state)
{
int rc = 0;
@@ -3171,6 +3255,7 @@ int dsi_post_clkoff_cb(void *priv,
int dsi_pre_clkon_cb(void *priv,
enum dsi_clk_type clk_type,
+ enum dsi_lclk_type l_type,
enum dsi_clk_state new_state)
{
int rc = 0;
@@ -3609,6 +3694,305 @@ static bool dsi_display_is_seamless_dfps_possible(
return true;
}
+static int dsi_display_update_dsi_bitrate(struct dsi_display *display,
+ u32 bit_clk_rate)
+{
+ int rc = 0;
+ int i;
+
+ pr_debug("%s:bit rate:%d\n", __func__, bit_clk_rate);
+ if (!display->panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (bit_clk_rate == 0) {
+ pr_err("Invalid bit clock rate\n");
+ return -EINVAL;
+ }
+
+ display->config.bit_clk_rate_hz = bit_clk_rate;
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ struct dsi_display_ctrl *dsi_disp_ctrl = &display->ctrl[i];
+ struct dsi_ctrl *ctrl = dsi_disp_ctrl->ctrl;
+ u32 num_of_lanes = 0, bpp;
+ u64 bit_rate, pclk_rate, bit_rate_per_lane, byte_clk_rate;
+ struct dsi_host_common_cfg *host_cfg;
+
+ mutex_lock(&ctrl->ctrl_lock);
+
+ host_cfg = &display->panel->host_config;
+ if (host_cfg->data_lanes & DSI_DATA_LANE_0)
+ num_of_lanes++;
+ if (host_cfg->data_lanes & DSI_DATA_LANE_1)
+ num_of_lanes++;
+ if (host_cfg->data_lanes & DSI_DATA_LANE_2)
+ num_of_lanes++;
+ if (host_cfg->data_lanes & DSI_DATA_LANE_3)
+ num_of_lanes++;
+
+ if (num_of_lanes == 0) {
+ pr_err("Invalid lane count\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ bpp = dsi_pixel_format_to_bpp(host_cfg->dst_format);
+
+ bit_rate = display->config.bit_clk_rate_hz * num_of_lanes;
+ bit_rate_per_lane = bit_rate;
+ do_div(bit_rate_per_lane, num_of_lanes);
+ pclk_rate = bit_rate;
+ do_div(pclk_rate, bpp);
+ byte_clk_rate = bit_rate_per_lane;
+ do_div(byte_clk_rate, 8);
+ pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
+ bit_rate, bit_rate_per_lane);
+ pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
+ byte_clk_rate, pclk_rate);
+
+ ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
+ ctrl->clk_freq.pix_clk_rate = pclk_rate;
+ rc = dsi_clk_set_link_frequencies(display->dsi_clk_handle,
+ ctrl->clk_freq, ctrl->cell_index);
+ if (rc) {
+ pr_err("Failed to update link frequencies\n");
+ goto error;
+ }
+
+ ctrl->host_config.bit_clk_rate_hz = bit_clk_rate;
+error:
+ mutex_unlock(&ctrl->ctrl_lock);
+
+ /* TODO: recover ctrl->clk_freq in case of failure */
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void _dsi_display_calc_pipe_delay(struct dsi_display *display,
+ struct dsi_dyn_clk_delay *delay,
+ struct dsi_display_mode *mode)
+{
+ u32 esc_clk_rate_hz;
+ u32 pclk_to_esc_ratio, byte_to_esc_ratio, hr_bit_to_esc_ratio;
+ u32 hsync_period = 0;
+ struct dsi_display_ctrl *m_ctrl;
+ struct dsi_ctrl *dsi_ctrl;
+ struct dsi_phy_cfg *cfg;
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+ dsi_ctrl = m_ctrl->ctrl;
+
+ cfg = &(m_ctrl->phy->cfg);
+
+ esc_clk_rate_hz = dsi_ctrl->clk_freq.esc_clk_rate * 1000;
+ pclk_to_esc_ratio = ((dsi_ctrl->clk_freq.pix_clk_rate * 1000) /
+ esc_clk_rate_hz);
+ byte_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 1000) /
+ esc_clk_rate_hz);
+ hr_bit_to_esc_ratio = ((dsi_ctrl->clk_freq.byte_clk_rate * 4 * 1000) /
+ esc_clk_rate_hz);
+
+ hsync_period = DSI_H_TOTAL_DSC(&mode->timing);
+ delay->pipe_delay = (hsync_period + 1) / pclk_to_esc_ratio;
+ if (!display->panel->video_config.eof_bllp_lp11_en)
+ delay->pipe_delay += (17 / pclk_to_esc_ratio) +
+ ((21 + (display->config.common_config.t_clk_pre + 1) +
+ (display->config.common_config.t_clk_post + 1)) /
+ byte_to_esc_ratio) +
+ ((((cfg->timing.lane_v3[8] >> 1) + 1) +
+ ((cfg->timing.lane_v3[6] >> 1) + 1) +
+ ((cfg->timing.lane_v3[3] * 4) +
+ (cfg->timing.lane_v3[5] >> 1) + 1) +
+ ((cfg->timing.lane_v3[7] >> 1) + 1) +
+ ((cfg->timing.lane_v3[1] >> 1) + 1) +
+ ((cfg->timing.lane_v3[4] >> 1) + 1)) /
+ hr_bit_to_esc_ratio);
+
+ delay->pipe_delay2 = 0;
+ if (display->panel->host_config.force_hs_clk_lane)
+ delay->pipe_delay2 = (6 / byte_to_esc_ratio) +
+ ((((cfg->timing.lane_v3[1] >> 1) + 1) +
+ ((cfg->timing.lane_v3[4] >> 1) + 1)) /
+ hr_bit_to_esc_ratio);
+
+ /* 130 us pll delay recommended by h/w doc */
+ delay->pll_delay = ((130 * esc_clk_rate_hz) / 1000000) * 2;
+}
+
+static int _dsi_display_dyn_update_clks(struct dsi_display *display,
+ struct link_clk_freq *bkp_freq)
+{
+ int rc = 0, i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ dsi_clk_prepare_enable(&display->clock_info.src_clks);
+
+ rc = dsi_clk_update_parent(&display->clock_info.shadow_clks,
+ &display->clock_info.mux_clks);
+ if (rc) {
+ pr_err("failed update mux parent to shadow\n");
+ goto exit;
+ }
+
+ for (i = 0; (i < display->ctrl_count) &&
+ (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+ rc = dsi_clk_set_byte_clk_rate(display->dsi_clk_handle,
+ ctrl->ctrl->clk_freq.byte_clk_rate, i);
+ if (rc) {
+ pr_err("failed to set byte rate for index:%d\n", i);
+ goto recover_byte_clk;
+ }
+ rc = dsi_clk_set_pixel_clk_rate(display->dsi_clk_handle,
+ ctrl->ctrl->clk_freq.pix_clk_rate, i);
+ if (rc) {
+ pr_err("failed to set pix rate for index:%d\n", i);
+ goto recover_pix_clk;
+ }
+ }
+
+ for (i = 0; (i < display->ctrl_count) &&
+ (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+ ctrl = &display->ctrl[i];
+ if (ctrl == m_ctrl)
+ continue;
+ dsi_phy_dynamic_refresh_trigger(ctrl->phy, false);
+ }
+ dsi_phy_dynamic_refresh_trigger(m_ctrl->phy, true);
+
+ /* wait for dynamic refresh done */
+ for (i = 0; (i < display->ctrl_count) &&
+ (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_wait4dynamic_refresh_done(ctrl->ctrl);
+ if (rc) {
+ pr_err("wait4dynamic refresh failed for dsi:%d\n", i);
+ goto recover_pix_clk;
+ } else {
+ pr_info("dynamic refresh done on dsi: %s\n",
+ i ? "slave" : "master");
+ }
+ }
+
+ for (i = 0; (i < display->ctrl_count) &&
+ (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+ ctrl = &display->ctrl[i];
+ dsi_phy_dynamic_refresh_clear(ctrl->phy);
+ }
+
+ rc = dsi_clk_update_parent(&display->clock_info.src_clks,
+ &display->clock_info.mux_clks);
+ if (rc)
+ pr_err("could not switch back to src clks %d\n", rc);
+
+ dsi_clk_disable_unprepare(&display->clock_info.src_clks);
+
+ return rc;
+
+recover_pix_clk:
+ for (i = 0; (i < display->ctrl_count) &&
+ (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+ dsi_clk_set_pixel_clk_rate(display->dsi_clk_handle,
+ bkp_freq->pix_clk_rate, i);
+ }
+
+recover_byte_clk:
+ for (i = 0; (i < display->ctrl_count) &&
+ (i < MAX_DSI_CTRLS_PER_DISPLAY); i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+ dsi_clk_set_byte_clk_rate(display->dsi_clk_handle,
+ bkp_freq->byte_clk_rate, i);
+ }
+
+exit:
+ dsi_clk_disable_unprepare(&display->clock_info.src_clks);
+
+ return rc;
+}
+
+static int dsi_display_dynamic_clk_switch(struct dsi_display *display,
+ struct dsi_display_mode *mode)
+{
+ int rc = 0, mask, i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+ struct dsi_dyn_clk_delay delay;
+ struct link_clk_freq bkp_freq;
+
+ dsi_panel_acquire_panel_lock(display->panel);
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ dsi_display_clk_ctrl(display->dsi_clk_handle, DSI_ALL_CLKS, DSI_CLK_ON);
+
+ /* mask PLL unlock, FIFO overflow and underflow errors */
+ mask = BIT(DSI_PLL_UNLOCK_ERR) | BIT(DSI_FIFO_UNDERFLOW) |
+ BIT(DSI_FIFO_OVERFLOW);
+ dsi_display_mask_ctrl_error_interrupts(display, mask, true);
+
+ /* update the phy timings based on new mode */
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ dsi_phy_update_phy_timings(ctrl->phy, &display->config);
+ }
+
+ /* back up existing rates to handle failure case */
+ bkp_freq.byte_clk_rate = m_ctrl->ctrl->clk_freq.byte_clk_rate;
+ bkp_freq.pix_clk_rate = m_ctrl->ctrl->clk_freq.pix_clk_rate;
+ bkp_freq.esc_clk_rate = m_ctrl->ctrl->clk_freq.esc_clk_rate;
+
+ rc = dsi_display_update_dsi_bitrate(display, mode->timing.clk_rate_hz);
+ if (rc) {
+ pr_err("failed set link frequencies %d\n", rc);
+ goto exit;
+ }
+
+ /* calculate pipe delays */
+ _dsi_display_calc_pipe_delay(display, &delay, mode);
+
+ /* configure dynamic refresh ctrl registers */
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->phy)
+ continue;
+ if (ctrl == m_ctrl)
+ dsi_phy_config_dynamic_refresh(ctrl->phy, &delay, true);
+ else
+ dsi_phy_config_dynamic_refresh(ctrl->phy, &delay,
+ false);
+ }
+
+ rc = _dsi_display_dyn_update_clks(display, &bkp_freq);
+
+exit:
+ dsi_display_mask_ctrl_error_interrupts(display, mask, false);
+
+ dsi_display_clk_ctrl(display->dsi_clk_handle, DSI_ALL_CLKS,
+ DSI_CLK_OFF);
+
+ /* store newly calculated phy timings in mode private info */
+ dsi_phy_dyn_refresh_cache_phy_timings(m_ctrl->phy,
+ mode->priv_info->phy_timing_val,
+ mode->priv_info->phy_timing_len);
+
+ dsi_panel_release_panel_lock(display->panel);
+
+ return rc;
+}
+
static int dsi_display_dfps_update(struct dsi_display *display,
struct dsi_display_mode *dsi_mode)
{
@@ -3872,6 +4256,16 @@ static int dsi_display_set_mode_sub(struct dsi_display *display,
display->name, rc);
goto error;
}
+ } else if (mode->dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK) {
+ rc = dsi_display_dynamic_clk_switch(display, mode);
+ if (rc)
+ pr_err("dynamic clk change failed %d\n", rc);
+ /*
+ * skip rest of the opearations since
+ * dsi_display_dynamic_clk_switch() already takes
+ * care of them.
+ */
+ return rc;
}
for (i = 0; i < display->ctrl_count; i++) {
@@ -4107,84 +4501,6 @@ static int dsi_display_force_update_dsi_clk(struct dsi_display *display)
return rc;
}
-static int dsi_display_request_update_dsi_bitrate(struct dsi_display *display,
- u32 bit_clk_rate)
-{
- int rc = 0;
- int i;
-
- pr_debug("%s:bit rate:%d\n", __func__, bit_clk_rate);
- if (!display->panel) {
- pr_err("Invalid params\n");
- return -EINVAL;
- }
-
- if (bit_clk_rate == 0) {
- pr_err("Invalid bit clock rate\n");
- return -EINVAL;
- }
-
- display->config.bit_clk_rate_hz = bit_clk_rate;
-
- for (i = 0; i < display->ctrl_count; i++) {
- struct dsi_display_ctrl *dsi_disp_ctrl = &display->ctrl[i];
- struct dsi_ctrl *ctrl = dsi_disp_ctrl->ctrl;
- u32 num_of_lanes = 0;
- u32 bpp = 3;
- u64 bit_rate, pclk_rate, bit_rate_per_lane, byte_clk_rate;
- struct dsi_host_common_cfg *host_cfg;
-
- mutex_lock(&ctrl->ctrl_lock);
-
- host_cfg = &display->panel->host_config;
- if (host_cfg->data_lanes & DSI_DATA_LANE_0)
- num_of_lanes++;
- if (host_cfg->data_lanes & DSI_DATA_LANE_1)
- num_of_lanes++;
- if (host_cfg->data_lanes & DSI_DATA_LANE_2)
- num_of_lanes++;
- if (host_cfg->data_lanes & DSI_DATA_LANE_3)
- num_of_lanes++;
-
- if (num_of_lanes == 0) {
- pr_err("Invalid lane count\n");
- rc = -EINVAL;
- goto error;
- }
-
- bit_rate = display->config.bit_clk_rate_hz * num_of_lanes;
- bit_rate_per_lane = bit_rate;
- do_div(bit_rate_per_lane, num_of_lanes);
- pclk_rate = bit_rate;
- do_div(pclk_rate, (8 * bpp));
- byte_clk_rate = bit_rate_per_lane;
- do_div(byte_clk_rate, 8);
- pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
- bit_rate, bit_rate_per_lane);
- pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
- byte_clk_rate, pclk_rate);
-
- ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
- ctrl->clk_freq.pix_clk_rate = pclk_rate;
- rc = dsi_clk_set_link_frequencies(display->dsi_clk_handle,
- ctrl->clk_freq, ctrl->cell_index);
- if (rc) {
- pr_err("Failed to update link frequencies\n");
- goto error;
- }
-
- ctrl->host_config.bit_clk_rate_hz = bit_clk_rate;
-error:
- mutex_unlock(&ctrl->ctrl_lock);
-
- /* TODO: recover ctrl->clk_freq in case of failure */
- if (rc)
- return rc;
- }
-
- return 0;
-}
-
static ssize_t sysfs_dynamic_dsi_clk_read(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -4235,6 +4551,11 @@ static ssize_t sysfs_dynamic_dsi_clk_write(struct device *dev,
return rc;
}
+ if (display->panel->panel_mode != DSI_OP_CMD_MODE) {
+ pr_err("only supported for command mode\n");
+ return -ENOTSUPP;
+ }
+
if (clk_rate <= 0) {
pr_err("%s: bitrate should be greater than 0\n", __func__);
return -EINVAL;
@@ -4250,7 +4571,7 @@ static ssize_t sysfs_dynamic_dsi_clk_write(struct device *dev,
mutex_lock(&display->display_lock);
display->cached_clk_rate = clk_rate;
- rc = dsi_display_request_update_dsi_bitrate(display, clk_rate);
+ rc = dsi_display_update_dsi_bitrate(display, clk_rate);
if (!rc) {
pr_info("%s: bit clk is ready to be configured to '%d'\n",
__func__, clk_rate);
@@ -4387,10 +4708,16 @@ static int dsi_display_bind(struct device *dev,
goto error_ctrl_deinit;
}
- memcpy(&info.c_clks[i], &display_ctrl->ctrl->clk_info.core_clks,
- sizeof(struct dsi_core_clk_info));
- memcpy(&info.l_clks[i], &display_ctrl->ctrl->clk_info.link_clks,
- sizeof(struct dsi_link_clk_info));
+ memcpy(&info.c_clks[i],
+ (&display_ctrl->ctrl->clk_info.core_clks),
+ sizeof(struct dsi_core_clk_info));
+ memcpy(&info.l_hs_clks[i],
+ (&display_ctrl->ctrl->clk_info.hs_link_clks),
+ sizeof(struct dsi_link_hs_clk_info));
+ memcpy(&info.l_lp_clks[i],
+ (&display_ctrl->ctrl->clk_info.lp_link_clks),
+ sizeof(struct dsi_link_lp_clk_info));
+
info.c_clks[i].phandle = &priv->phandle;
info.bus_handle[i] =
display_ctrl->ctrl->axi_bus_info.bus_handle;
@@ -5029,7 +5356,8 @@ static int dsi_display_get_mode_count_no_lock(struct dsi_display *display,
u32 *count)
{
struct dsi_dfps_capabilities dfps_caps;
- int num_dfps_rates, rc = 0;
+ struct dsi_dyn_clk_caps *dyn_clk_caps;
+ int num_dfps_rates, num_bit_clks, rc = 0;
if (!display || !display->panel) {
pr_err("invalid display:%d panel:%d\n", display != NULL,
@@ -5046,12 +5374,16 @@ static int dsi_display_get_mode_count_no_lock(struct dsi_display *display,
return rc;
}
- num_dfps_rates = !dfps_caps.dfps_support ? 1 :
- dfps_caps.max_refresh_rate -
- dfps_caps.min_refresh_rate + 1;
+ num_dfps_rates = !dfps_caps.dfps_support ? 1 : dfps_caps.dfps_list_len;
+
+ dyn_clk_caps = &(display->panel->dyn_clk_caps);
+
+ num_bit_clks = !dyn_clk_caps->dyn_clk_support ? 1 :
+ dyn_clk_caps->bit_clk_list_len;
- /* Inflate num_of_modes by fps in dfps */
- *count = display->panel->num_timing_nodes * num_dfps_rates;
+ /* Inflate num_of_modes by fps and bit clks in dfps */
+ *count = display->panel->num_timing_nodes *
+ num_dfps_rates * num_bit_clks;
return 0;
}
@@ -5074,6 +5406,73 @@ int dsi_display_get_mode_count(struct dsi_display *display,
return 0;
}
+static void _dsi_display_populate_bit_clks(struct dsi_display *display,
+ int start, int end, u32 *mode_idx)
+{
+ struct dsi_dyn_clk_caps *dyn_clk_caps;
+ struct dsi_display_mode *src, *dst;
+ struct dsi_host_common_cfg *cfg;
+ int i, j, total_modes, bpp, lanes = 0;
+
+ if (!display || !mode_idx)
+ return;
+
+ dyn_clk_caps = &(display->panel->dyn_clk_caps);
+ if (!dyn_clk_caps->dyn_clk_support)
+ return;
+
+ cfg = &(display->panel->host_config);
+ bpp = dsi_pixel_format_to_bpp(cfg->dst_format);
+
+ if (cfg->data_lanes & DSI_LOGICAL_LANE_0)
+ lanes++;
+ if (cfg->data_lanes & DSI_LOGICAL_LANE_1)
+ lanes++;
+ if (cfg->data_lanes & DSI_LOGICAL_LANE_2)
+ lanes++;
+ if (cfg->data_lanes & DSI_LOGICAL_LANE_3)
+ lanes++;
+
+ dsi_display_get_mode_count_no_lock(display, &total_modes);
+
+ for (i = start; i < end; i++) {
+ src = &display->modes[i];
+ if (!src)
+ return;
+ /*
+ * TODO: currently setting the first bit rate in
+ * the list as preferred rate. But ideally should
+ * be based on user or device tree preferrence.
+ */
+ src->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[0];
+ src->pixel_clk_khz =
+ div_u64(src->timing.clk_rate_hz * lanes, bpp);
+ src->pixel_clk_khz /= 1000;
+ src->pixel_clk_khz *= display->ctrl_count;
+ }
+
+ for (i = 1; i < dyn_clk_caps->bit_clk_list_len; i++) {
+ if (*mode_idx >= total_modes)
+ return;
+ for (j = start; j < end; j++) {
+ src = &display->modes[j];
+ dst = &display->modes[*mode_idx];
+
+ if (!src || !dst) {
+ pr_err("invalid mode index\n");
+ return;
+ }
+ memcpy(dst, src, sizeof(struct dsi_display_mode));
+ dst->timing.clk_rate_hz = dyn_clk_caps->bit_clk_list[i];
+ dst->pixel_clk_khz =
+ div_u64(dst->timing.clk_rate_hz * lanes, bpp);
+ dst->pixel_clk_khz /= 1000;
+ dst->pixel_clk_khz *= display->ctrl_count;
+ (*mode_idx)++;
+ }
+ }
+}
+
void dsi_display_put_mode(struct dsi_display *display,
struct dsi_display_mode *mode)
{
@@ -5084,9 +5483,10 @@ int dsi_display_get_modes(struct dsi_display *display,
struct dsi_display_mode **out_modes)
{
struct dsi_dfps_capabilities dfps_caps;
+ struct dsi_dyn_clk_caps *dyn_clk_caps;
u32 num_dfps_rates, panel_mode_count, total_mode_count;
u32 mode_idx, array_idx = 0;
- int i, rc = -EINVAL;
+ int i, start, end, rc = -EINVAL;
if (!display || !out_modes) {
pr_err("Invalid params\n");
@@ -5118,9 +5518,9 @@ int dsi_display_get_modes(struct dsi_display *display,
goto error;
}
- num_dfps_rates = !dfps_caps.dfps_support ? 1 :
- dfps_caps.max_refresh_rate -
- dfps_caps.min_refresh_rate + 1;
+ dyn_clk_caps = &(display->panel->dyn_clk_caps);
+
+ num_dfps_rates = !dfps_caps.dfps_support ? 1 : dfps_caps.dfps_list_len;
panel_mode_count = display->panel->num_timing_nodes;
@@ -5141,14 +5541,14 @@ int dsi_display_get_modes(struct dsi_display *display,
goto error;
}
- if (display->ctrl_count > 1) { /* TODO: remove if */
- panel_mode.timing.h_active *= display->ctrl_count;
- panel_mode.timing.h_front_porch *= display->ctrl_count;
- panel_mode.timing.h_sync_width *= display->ctrl_count;
- panel_mode.timing.h_back_porch *= display->ctrl_count;
- panel_mode.timing.h_skew *= display->ctrl_count;
- panel_mode.pixel_clk_khz *= display->ctrl_count;
- }
+ panel_mode.timing.h_active *= display->ctrl_count;
+ panel_mode.timing.h_front_porch *= display->ctrl_count;
+ panel_mode.timing.h_sync_width *= display->ctrl_count;
+ panel_mode.timing.h_back_porch *= display->ctrl_count;
+ panel_mode.timing.h_skew *= display->ctrl_count;
+ panel_mode.pixel_clk_khz *= display->ctrl_count;
+
+ start = array_idx;
for (i = 0; i < num_dfps_rates; i++) {
struct dsi_display_mode *sub_mode =
@@ -5162,24 +5562,24 @@ int dsi_display_get_modes(struct dsi_display *display,
}
memcpy(sub_mode, &panel_mode, sizeof(panel_mode));
+ array_idx++;
- if (dfps_caps.dfps_support) {
- curr_refresh_rate =
- sub_mode->timing.refresh_rate;
- sub_mode->timing.refresh_rate =
- dfps_caps.min_refresh_rate +
- (i % num_dfps_rates);
+ if (!dfps_caps.dfps_support)
+ continue;
- dsi_display_get_dfps_timing(display,
- sub_mode, curr_refresh_rate);
+ curr_refresh_rate = sub_mode->timing.refresh_rate;
+ sub_mode->timing.refresh_rate = dfps_caps.dfps_list[i];
- sub_mode->pixel_clk_khz =
- (DSI_H_TOTAL(&sub_mode->timing) *
- DSI_V_TOTAL(&sub_mode->timing) *
- sub_mode->timing.refresh_rate) / 1000;
- }
- array_idx++;
+ dsi_display_get_dfps_timing(display, sub_mode,
+ curr_refresh_rate);
}
+
+ end = array_idx;
+ /*
+ * if dynamic clk switch is supported then update all the bit
+ * clk rates.
+ */
+ _dsi_display_populate_bit_clks(display, start, end, &array_idx);
}
*out_modes = display->modes;
@@ -5262,7 +5662,8 @@ int dsi_display_find_mode(struct dsi_display *display,
if (cmp->timing.v_active == m->timing.v_active &&
cmp->timing.h_active == m->timing.h_active &&
- cmp->timing.refresh_rate == m->timing.refresh_rate) {
+ cmp->timing.refresh_rate == m->timing.refresh_rate &&
+ cmp->pixel_clk_khz == m->pixel_clk_khz) {
*out_mode = m;
rc = 0;
break;
@@ -5271,9 +5672,10 @@ int dsi_display_find_mode(struct dsi_display *display,
mutex_unlock(&display->display_lock);
if (!*out_mode) {
- pr_err("[%s] failed to find mode for v_active %u h_active %u rate %u\n",
+ pr_err("[%s] failed to find mode for v_active %u h_active %u fps %u pclk %u\n",
display->name, cmp->timing.v_active,
- cmp->timing.h_active, cmp->timing.refresh_rate);
+ cmp->timing.h_active, cmp->timing.refresh_rate,
+ cmp->pixel_clk_khz);
rc = -ENOENT;
}
@@ -5281,7 +5683,7 @@ int dsi_display_find_mode(struct dsi_display *display,
}
/**
- * dsi_display_validate_mode_vrr() - Validate if varaible refresh case.
+ * dsi_display_validate_mode_change() - Validate if varaible refresh case.
* @display: DSI display handle.
* @cur_dsi_mode: Current DSI mode.
* @mode: Mode value structure to be validated.
@@ -5289,16 +5691,15 @@ int dsi_display_find_mode(struct dsi_display *display,
* is change in fps but vactive and hactive are same.
* Return: error code.
*/
-int dsi_display_validate_mode_vrr(struct dsi_display *display,
- struct dsi_display_mode *cur_dsi_mode,
- struct dsi_display_mode *mode)
+int dsi_display_validate_mode_change(struct dsi_display *display,
+ struct dsi_display_mode *cur_mode,
+ struct dsi_display_mode *adj_mode)
{
int rc = 0;
- struct dsi_display_mode adj_mode, cur_mode;
struct dsi_dfps_capabilities dfps_caps;
- u32 curr_refresh_rate;
+ struct dsi_dyn_clk_caps *dyn_clk_caps;
- if (!display || !mode) {
+ if (!display || !adj_mode) {
pr_err("Invalid params\n");
return -EINVAL;
}
@@ -5310,65 +5711,43 @@ int dsi_display_validate_mode_vrr(struct dsi_display *display,
mutex_lock(&display->display_lock);
- adj_mode = *mode;
- cur_mode = *cur_dsi_mode;
-
- if ((cur_mode.timing.refresh_rate != adj_mode.timing.refresh_rate) &&
- (cur_mode.timing.v_active == adj_mode.timing.v_active) &&
- (cur_mode.timing.h_active == adj_mode.timing.h_active)) {
-
- curr_refresh_rate = cur_mode.timing.refresh_rate;
- rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
- if (rc) {
- pr_err("[%s] failed to get dfps caps from panel\n",
- display->name);
- goto error;
- }
-
- cur_mode.timing.refresh_rate =
- adj_mode.timing.refresh_rate;
-
- rc = dsi_display_get_dfps_timing(display,
- &cur_mode, curr_refresh_rate);
- if (rc) {
- pr_err("[%s] seamless vrr not possible rc=%d\n",
- display->name, rc);
- goto error;
+ if ((cur_mode->timing.v_active == adj_mode->timing.v_active) &&
+ (cur_mode->timing.h_active == adj_mode->timing.h_active)) {
+ /* dfps change use case */
+ if (cur_mode->timing.refresh_rate !=
+ adj_mode->timing.refresh_rate) {
+ dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+ if (!dfps_caps.dfps_support) {
+ pr_err("invalid mode dfps not supported\n");
+ rc = -ENOTSUPP;
+ goto error;
+ }
+ pr_debug("Mode switch is seamless variable refresh\n");
+ adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
+ SDE_EVT32(cur_mode->timing.refresh_rate,
+ adj_mode->timing.refresh_rate,
+ cur_mode->timing.h_front_porch,
+ adj_mode->timing.h_front_porch);
}
- switch (dfps_caps.type) {
- /*
- * Ignore any round off factors in porch calculation.
- * Worse case is set to 5.
- */
- case DSI_DFPS_IMMEDIATE_VFP:
- if (abs(DSI_V_TOTAL(&cur_mode.timing) -
- DSI_V_TOTAL(&adj_mode.timing)) > 5)
- pr_err("Mismatch vfp fps:%d new:%d given:%d\n",
- adj_mode.timing.refresh_rate,
- cur_mode.timing.v_front_porch,
- adj_mode.timing.v_front_porch);
- break;
-
- case DSI_DFPS_IMMEDIATE_HFP:
- if (abs(DSI_H_TOTAL(&cur_mode.timing) -
- DSI_H_TOTAL(&adj_mode.timing)) > 5)
- pr_err("Mismatch hfp fps:%d new:%d given:%d\n",
- adj_mode.timing.refresh_rate,
- cur_mode.timing.h_front_porch,
- adj_mode.timing.h_front_porch);
- break;
- default:
- pr_err("Unsupported DFPS mode %d\n",
- dfps_caps.type);
- rc = -ENOTSUPP;
+ /* dynamic clk change use case */
+ if (cur_mode->pixel_clk_khz != adj_mode->pixel_clk_khz) {
+ dyn_clk_caps = &(display->panel->dyn_clk_caps);
+ if (!dyn_clk_caps->dyn_clk_support) {
+ pr_err("dyn clk change not supported\n");
+ rc = -ENOTSUPP;
+ goto error;
+ }
+ if (adj_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR) {
+ pr_err("dfps and dyn clk not supported in same commit\n");
+ rc = -ENOTSUPP;
+ goto error;
+ }
+ pr_debug("dynamic clk change detected\n");
+ adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_DYN_CLK;
+ SDE_EVT32(cur_mode->pixel_clk_khz,
+ adj_mode->pixel_clk_khz);
}
-
- pr_debug("Mode switch is seamless variable refresh\n");
- mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
- SDE_EVT32(curr_refresh_rate, adj_mode.timing.refresh_rate,
- cur_mode.timing.h_front_porch,
- adj_mode.timing.h_front_porch);
}
error:
@@ -5799,6 +6178,8 @@ int dsi_display_prepare(struct dsi_display *display)
mode = display->panel->cur_mode;
+ dsi_display_set_ctrl_esd_check_flag(display, false);
+
if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) {
if (display->is_cont_splash_enabled) {
pr_err("DMS is not supposed to be set on first frame\n");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 6b1c0292becf..f65f0f59e336 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -398,13 +398,14 @@ int dsi_display_validate_mode(struct dsi_display *display,
u32 flags);
/**
- * dsi_display_validate_mode_vrr() - validates mode if variable refresh case
+ * dsi_display_validate_mode_change() - validates mode if variable refresh case
+ * or dynamic clk change case
* @display: Handle to display.
* @mode: Mode to be validated..
*
* Return: 0 if error code.
*/
-int dsi_display_validate_mode_vrr(struct dsi_display *display,
+int dsi_display_validate_mode_change(struct dsi_display *display,
struct dsi_display_mode *cur_dsi_mode,
struct dsi_display_mode *mode);
@@ -497,12 +498,14 @@ int dsi_display_disable(struct dsi_display *display);
* dsi_pre_clkoff_cb() - Callback before clock is turned off
* @priv: private data pointer.
* @clk_type: clock which is being turned on.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @new_state: next state for the clock.
*
* @return: error code.
*/
int dsi_pre_clkoff_cb(void *priv, enum dsi_clk_type clk_type,
- enum dsi_clk_state new_state);
+ enum dsi_lclk_type l_type,
+ enum dsi_clk_state new_state);
/**
* dsi_display_update_pps() - update PPS buffer.
@@ -519,35 +522,40 @@ int dsi_display_update_pps(char *pps_cmd, void *display);
* dsi_post_clkoff_cb() - Callback after clock is turned off
* @priv: private data pointer.
* @clk_type: clock which is being turned on.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @curr_state: current state for the clock.
*
* @return: error code.
*/
int dsi_post_clkoff_cb(void *priv, enum dsi_clk_type clk_type,
- enum dsi_clk_state curr_state);
+ enum dsi_lclk_type l_type,
+ enum dsi_clk_state curr_state);
/**
* dsi_post_clkon_cb() - Callback after clock is turned on
* @priv: private data pointer.
* @clk_type: clock which is being turned on.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @curr_state: current state for the clock.
*
* @return: error code.
*/
int dsi_post_clkon_cb(void *priv, enum dsi_clk_type clk_type,
- enum dsi_clk_state curr_state);
-
+ enum dsi_lclk_type l_type,
+ enum dsi_clk_state curr_state);
/**
* dsi_pre_clkon_cb() - Callback before clock is turned on
* @priv: private data pointer.
* @clk_type: clock which is being turned on.
+ * @l_type: specifies if the clock is HS or LP type. Valid only for link clocks.
* @new_state: next state for the clock.
*
* @return: error code.
*/
int dsi_pre_clkon_cb(void *priv, enum dsi_clk_type clk_type,
- enum dsi_clk_state new_state);
+ enum dsi_lclk_type l_type,
+ enum dsi_clk_state new_state);
/**
* dsi_display_unprepare() - power off display hardware.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 5b4786596d33..fca41e4ac1ee 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -63,6 +63,8 @@ static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode,
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DMS;
if (msm_is_mode_seamless_vrr(drm_mode))
dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
+ if (msm_is_mode_seamless_dyn_clk(drm_mode))
+ dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DYN_CLK;
dsi_mode->timing.h_sync_polarity =
!!(drm_mode->flags & DRM_MODE_FLAG_PHSYNC);
@@ -105,13 +107,18 @@ void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DMS;
if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR)
drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_VRR;
+ if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)
+ drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYN_CLK;
if (dsi_mode->timing.h_sync_polarity)
drm_mode->flags |= DRM_MODE_FLAG_PHSYNC;
if (dsi_mode->timing.v_sync_polarity)
drm_mode->flags |= DRM_MODE_FLAG_PVSYNC;
- drm_mode_set_name(drm_mode);
+ /* set mode name */
+ snprintf(drm_mode->name, DRM_DISPLAY_MODE_LEN, "%dx%dx%dx%d",
+ drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->vrefresh,
+ drm_mode->clock);
}
static int dsi_bridge_attach(struct drm_bridge *bridge)
@@ -152,7 +159,8 @@ static void dsi_bridge_pre_enable(struct drm_bridge *bridge)
}
if (c_bridge->dsi_mode.dsi_mode_flags &
- (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR)) {
+ (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR |
+ DSI_MODE_FLAG_DYN_CLK)) {
pr_debug("[%d] seamless pre-enable\n", c_bridge->id);
return;
}
@@ -186,6 +194,7 @@ static void dsi_bridge_enable(struct drm_bridge *bridge)
{
int rc = 0;
struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+ struct dsi_display *display;
if (!bridge) {
pr_err("Invalid params\n");
@@ -197,11 +206,15 @@ static void dsi_bridge_enable(struct drm_bridge *bridge)
pr_debug("[%d] seamless enable\n", c_bridge->id);
return;
}
+ display = c_bridge->display;
- rc = dsi_display_post_enable(c_bridge->display);
+ rc = dsi_display_post_enable(display);
if (rc)
pr_err("[%d] DSI display post enabled failed, rc=%d\n",
c_bridge->id, rc);
+
+ if (display && display->drm_conn)
+ sde_connector_helper_bridge_enable(display->drm_conn);
}
static void dsi_bridge_disable(struct drm_bridge *bridge)
@@ -270,6 +283,12 @@ static void dsi_bridge_mode_set(struct drm_bridge *bridge,
memset(&(c_bridge->dsi_mode), 0x0, sizeof(struct dsi_display_mode));
convert_to_dsi_mode(adjusted_mode, &(c_bridge->dsi_mode));
+
+ /* restore bit_clk_rate also for dynamic clk use cases */
+ c_bridge->dsi_mode.timing.clk_rate_hz =
+ dsi_drm_find_bit_clk_rate(c_bridge->display, adjusted_mode);
+
+ pr_debug("clk_rate: %llu\n", c_bridge->dsi_mode.timing.clk_rate_hz);
}
static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
@@ -328,17 +347,20 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
convert_to_dsi_mode(&crtc_state->crtc->state->mode,
&cur_dsi_mode);
- rc = dsi_display_validate_mode_vrr(c_bridge->display,
+ rc = dsi_display_validate_mode_change(c_bridge->display,
&cur_dsi_mode, &dsi_mode);
- if (rc)
- pr_debug("[%s] vrr mode mismatch failure rc=%d\n",
+ if (rc) {
+ pr_err("[%s] seamless mode mismatch failure rc=%d\n",
c_bridge->display->name, rc);
+ return false;
+ }
cur_mode = crtc_state->crtc->mode;
/* No DMS/VRR when drm pipeline is changing */
if (!drm_mode_equal(&cur_mode, adjusted_mode) &&
(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
+ (!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)) &&
(!crtc_state->active_changed ||
display->is_cont_splash_enabled))
dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
@@ -350,6 +372,33 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
return true;
}
+u64 dsi_drm_find_bit_clk_rate(void *display,
+ const struct drm_display_mode *drm_mode)
+{
+ int i = 0, count = 0;
+ struct dsi_display *dsi_display = display;
+ struct dsi_display_mode *dsi_mode;
+ u64 bit_clk_rate = 0;
+
+ if (!dsi_display || !drm_mode)
+ return 0;
+
+ dsi_display_get_mode_count(dsi_display, &count);
+
+ for (i = 0; i < count; i++) {
+ dsi_mode = &dsi_display->modes[i];
+ if ((dsi_mode->timing.v_active == drm_mode->vdisplay) &&
+ (dsi_mode->timing.h_active == drm_mode->hdisplay) &&
+ (dsi_mode->pixel_clk_khz == drm_mode->clock) &&
+ (dsi_mode->timing.refresh_rate == drm_mode->vrefresh)) {
+ bit_clk_rate = dsi_mode->timing.clk_rate_hz;
+ break;
+ }
+ }
+
+ return bit_clk_rate;
+}
+
int dsi_conn_get_mode_info(const struct drm_display_mode *drm_mode,
struct msm_mode_info *mode_info,
u32 max_mixer_width, void *display)
@@ -373,7 +422,7 @@ int dsi_conn_get_mode_info(const struct drm_display_mode *drm_mode,
mode_info->prefill_lines = dsi_mode.priv_info->panel_prefill_lines;
mode_info->jitter_numer = dsi_mode.priv_info->panel_jitter_numer;
mode_info->jitter_denom = dsi_mode.priv_info->panel_jitter_denom;
- mode_info->clk_rate = dsi_mode.priv_info->clk_rate_hz;
+ mode_info->clk_rate = dsi_drm_find_bit_clk_rate(display, drm_mode);
memcpy(&mode_info->topology, &dsi_mode.priv_info->topology,
sizeof(struct msm_display_topology));
@@ -498,6 +547,9 @@ int dsi_conn_set_info_blob(struct drm_connector *connector,
panel->dfps_caps.max_refresh_rate);
}
+ sde_kms_info_add_keystr(info, "dyn bitclk support",
+ panel->dyn_clk_caps.dyn_clk_support ? "true" : "false");
+
switch (panel->phy_props.rotation) {
case DSI_PANEL_ROTATE_NONE:
sde_kms_info_add_keystr(info, "panel orientation", "none");
@@ -647,6 +699,9 @@ int dsi_connector_get_modes(struct drm_connector *connector,
}
m->width_mm = connector->display_info.width_mm;
m->height_mm = connector->display_info.height_mm;
+ /* set the first mode in list as preferred */
+ if (i == 0)
+ m->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, m);
}
end:
@@ -753,6 +808,9 @@ int dsi_conn_post_kickoff(struct drm_connector *connector)
c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_VRR;
}
+ /* ensure dynamic clk switch flag is reset */
+ c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_DYN_CLK;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 2bad8c09171c..8d3e7640d074 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -150,4 +150,6 @@ int dsi_conn_post_kickoff(struct drm_connector *connector);
void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
struct drm_display_mode *drm_mode);
+u64 dsi_drm_find_bit_clk_rate(void *display,
+ const struct drm_display_mode *drm_mode);
#endif /* _DSI_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
index 174be9f0d8f2..9ccff4b9ec60 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -45,4 +45,14 @@
#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
+#define PLL_CALC_DATA(addr0, addr1, data0, data1) \
+ (((data1) << 24) | ((((addr1)/4) & 0xFF) << 16) | \
+ ((data0) << 8) | (((addr0)/4) & 0xFF))
+
+#define DSI_DYN_REF_REG_W(base, offset, addr0, addr1, data0, data1) \
+ writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
+ (base) + (offset))
+
+#define DSI_GEN_R32(base, offset) readl_relaxed(base + (offset))
+#define DSI_GEN_W32(base, offset, val) writel_relaxed((val), base + (offset))
#endif /* _DSI_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 38fc727b2258..e4b07344a621 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1029,6 +1029,8 @@ static int dsi_panel_parse_misc_host_config(struct dsi_host_common_cfg *host,
host->append_tx_eot = of_property_read_bool(of_node,
"qcom,mdss-dsi-tx-eot-append");
+ host->force_hs_clk_lane = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-force-clock-lane-hs");
return 0;
}
@@ -1081,6 +1083,46 @@ error:
return rc;
}
+static int dsi_panel_parse_dyn_clk_caps(struct dsi_dyn_clk_caps *dyn_clk_caps,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ bool supported = false;
+
+ supported = of_property_read_bool(of_node, "qcom,dsi-dyn-clk-enable");
+
+ if (!supported) {
+ dyn_clk_caps->dyn_clk_support = false;
+ return rc;
+ }
+
+ of_find_property(of_node, "qcom,dsi-dyn-clk-list",
+ &dyn_clk_caps->bit_clk_list_len);
+ dyn_clk_caps->bit_clk_list_len /= sizeof(u32);
+ if (dyn_clk_caps->bit_clk_list_len < 1) {
+ pr_err("[%s] failed to get supported bit clk list\n", name);
+ return -EINVAL;
+ }
+
+ dyn_clk_caps->bit_clk_list = kcalloc(dyn_clk_caps->bit_clk_list_len,
+ sizeof(u32), GFP_KERNEL);
+ if (!dyn_clk_caps->bit_clk_list)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of_node, "qcom,dsi-dyn-clk-list",
+ dyn_clk_caps->bit_clk_list,
+ dyn_clk_caps->bit_clk_list_len);
+ if (rc) {
+ pr_err("[%s] failed to parse supported bit clk list\n", name);
+ return -EINVAL;
+ }
+
+ dyn_clk_caps->dyn_clk_support = true;
+
+ return 0;
+}
+
static int dsi_panel_parse_dfps_caps(struct dsi_dfps_capabilities *dfps_caps,
struct device_node *of_node,
const char *name)
@@ -1088,7 +1130,7 @@ static int dsi_panel_parse_dfps_caps(struct dsi_dfps_capabilities *dfps_caps,
int rc = 0;
bool supported = false;
const char *type;
- u32 val = 0;
+ u32 val = 0, i;
supported = of_property_read_bool(of_node,
"qcom,mdss-dsi-pan-enable-dynamic-fps");
@@ -1096,68 +1138,68 @@ static int dsi_panel_parse_dfps_caps(struct dsi_dfps_capabilities *dfps_caps,
if (!supported) {
pr_debug("[%s] DFPS is not supported\n", name);
dfps_caps->dfps_support = false;
+ return rc;
+ }
+
+ type = of_get_property(of_node,
+ "qcom,mdss-dsi-pan-fps-update",
+ NULL);
+ if (!type) {
+ pr_err("[%s] dfps type not defined\n", name);
+ rc = -EINVAL;
+ goto error;
+ } else if (!strcmp(type, "dfps_suspend_resume_mode")) {
+ dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
+ } else if (!strcmp(type, "dfps_immediate_clk_mode")) {
+ dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
+ } else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
+ dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
+ } else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
+ dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
} else {
+ pr_err("[%s] dfps type is not recognized\n", name);
+ rc = -EINVAL;
+ goto error;
+ }
- type = of_get_property(of_node,
- "qcom,mdss-dsi-pan-fps-update",
- NULL);
- if (!type) {
- pr_err("[%s] dfps type not defined\n", name);
- rc = -EINVAL;
- goto error;
- } else if (!strcmp(type, "dfps_suspend_resume_mode")) {
- dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
- } else if (!strcmp(type, "dfps_immediate_clk_mode")) {
- dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
- } else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
- dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
- } else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
- dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
- } else {
- pr_err("[%s] dfps type is not recognized\n", name);
- rc = -EINVAL;
- goto error;
- }
+ of_find_property(of_node, "qcom,dsi-supported-dfps-list",
+ &dfps_caps->dfps_list_len);
+ dfps_caps->dfps_list_len /= sizeof(u32);
+ if (dfps_caps->dfps_list_len < 1) {
+ pr_err("[%s] dfps refresh list not present\n", name);
+ rc = -EINVAL;
+ goto error;
+ }
- rc = of_property_read_u32(of_node,
- "qcom,mdss-dsi-min-refresh-rate",
- &val);
- if (rc) {
- pr_err("[%s] Min refresh rate is not defined\n", name);
- rc = -EINVAL;
- goto error;
- }
- dfps_caps->min_refresh_rate = val;
+ dfps_caps->dfps_list = kcalloc(dfps_caps->dfps_list_len, sizeof(u32),
+ GFP_KERNEL);
+ if (!dfps_caps->dfps_list) {
+ rc = -ENOMEM;
+ goto error;
+ }
- rc = of_property_read_u32(of_node,
- "qcom,mdss-dsi-max-refresh-rate",
- &val);
- if (rc) {
- pr_debug("[%s] Using default refresh rate\n", name);
- rc = of_property_read_u32(of_node,
- "qcom,mdss-dsi-panel-framerate",
- &val);
- if (rc) {
- pr_err("[%s] max refresh rate is not defined\n",
- name);
- rc = -EINVAL;
- goto error;
- }
- }
- dfps_caps->max_refresh_rate = val;
+ rc = of_property_read_u32_array(of_node, "qcom,dsi-supported-dfps-list",
+ dfps_caps->dfps_list,
+ dfps_caps->dfps_list_len);
+ if (rc) {
+ pr_err("[%s] dfps refresh rate list parse failed\n", name);
+ rc = -EINVAL;
+ goto error;
+ }
- if (dfps_caps->min_refresh_rate > dfps_caps->max_refresh_rate) {
- pr_err("[%s] min rate > max rate\n", name);
- rc = -EINVAL;
- }
+ dfps_caps->dfps_support = true;
- pr_debug("[%s] DFPS is supported %d-%d, mode %d\n", name,
- dfps_caps->min_refresh_rate,
- dfps_caps->max_refresh_rate,
- dfps_caps->type);
- dfps_caps->dfps_support = true;
- }
+ /* calculate max and min fps */
+ of_property_read_u32(of_node, "qcom,mdss-dsi-panel-framerate", &val);
+ dfps_caps->max_refresh_rate = val;
+ dfps_caps->min_refresh_rate = val;
+ for (i = 0; i < dfps_caps->dfps_list_len; i++) {
+ if (dfps_caps->dfps_list[i] < dfps_caps->min_refresh_rate)
+ dfps_caps->min_refresh_rate = dfps_caps->dfps_list[i];
+ else if (dfps_caps->dfps_list[i] > dfps_caps->max_refresh_rate)
+ dfps_caps->max_refresh_rate = dfps_caps->dfps_list[i];
+ }
error:
return rc;
}
@@ -2806,6 +2848,14 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
pr_err("failed to parse dfps configuration, rc=%d\n",
rc);
+ if (panel->panel_mode == DSI_OP_VIDEO_MODE) {
+ rc = dsi_panel_parse_dyn_clk_caps(&panel->dyn_clk_caps,
+ of_node, panel->name);
+ if (rc)
+ pr_err("failed to parse dynamic clk config, rc=%d\n",
+ rc);
+ }
+
rc = dsi_panel_parse_phy_props(&panel->phy_props,
of_node, panel->name);
if (rc) {
@@ -3201,7 +3251,7 @@ int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
if (mode->priv_info) {
config->video_timing.dsc_enabled = mode->priv_info->dsc_enabled;
config->video_timing.dsc = &mode->priv_info->dsc;
- config->bit_clk_rate_hz = mode->priv_info->clk_rate_hz;
+ config->bit_clk_rate_hz = mode->timing.clk_rate_hz;
}
config->esc_clk_rate_hz = 19200000;
mutex_unlock(&panel->panel_lock);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index c0ecb7ffb684..188fa0115753 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -65,10 +65,18 @@ enum dsi_dms_mode {
};
struct dsi_dfps_capabilities {
- bool dfps_support;
enum dsi_dfps_type type;
u32 min_refresh_rate;
u32 max_refresh_rate;
+ u32 *dfps_list;
+ u32 dfps_list_len;
+ bool dfps_support;
+};
+
+struct dsi_dyn_clk_caps {
+ bool dyn_clk_support;
+ u32 *bit_clk_list;
+ u32 bit_clk_list_len;
};
struct dsi_pinctrl_info {
@@ -164,6 +172,7 @@ struct dsi_panel {
enum dsi_op_mode panel_mode;
struct dsi_dfps_capabilities dfps_caps;
+ struct dsi_dyn_clk_caps dyn_clk_caps;
struct dsi_panel_phy_props phy_props;
struct dsi_display_mode *cur_mode;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 27fc20c46093..35e539f91fae 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -107,6 +107,9 @@ static int dsi_phy_regmap_init(struct platform_device *pdev,
phy->hw.base = ptr;
+ ptr = msm_ioremap(pdev, "dyn_refresh_base", phy->name);
+ phy->hw.dyn_pll_base = ptr;
+
pr_debug("[%s] map dsi_phy registers to %pK\n",
phy->name, phy->hw.base);
@@ -614,11 +617,8 @@ int dsi_phy_validate_mode(struct msm_dsi_phy *dsi_phy,
return -EINVAL;
}
- mutex_lock(&dsi_phy->phy_lock);
-
pr_debug("[PHY_%d] Skipping validation\n", dsi_phy->index);
- mutex_unlock(&dsi_phy->phy_lock);
return rc;
}
@@ -848,7 +848,7 @@ int dsi_phy_enable(struct msm_dsi_phy *phy,
rc = phy->hw.ops.calculate_timing_params(&phy->hw,
&phy->mode,
&config->common_config,
- &phy->cfg.timing);
+ &phy->cfg.timing, false);
if (rc) {
pr_err("[%s] failed to set timing, rc=%d\n", phy->name, rc);
goto error;
@@ -866,6 +866,27 @@ error:
return rc;
}
+/* update dsi phy timings for dynamic clk switch use case */
+int dsi_phy_update_phy_timings(struct msm_dsi_phy *phy,
+ struct dsi_host_config *config)
+{
+ int rc = 0;
+
+ if (!phy || !config) {
+ pr_err("invalid argument\n");
+ return -EINVAL;
+ }
+
+ memcpy(&phy->mode, &config->video_timing, sizeof(phy->mode));
+ rc = phy->hw.ops.calculate_timing_params(&phy->hw, &phy->mode,
+ &config->common_config,
+ &phy->cfg.timing, true);
+ if (rc)
+ pr_err("failed to calculate phy timings %d\n", rc);
+
+ return rc;
+}
+
int dsi_phy_lane_reset(struct msm_dsi_phy *phy)
{
int ret = 0;
@@ -905,6 +926,26 @@ int dsi_phy_disable(struct msm_dsi_phy *phy)
}
/**
+ * dsi_phy_set_clamp_state() - configure clamps for DSI lanes
+ * @phy: DSI PHY handle.
+ * @enable: boolean to specify clamp enable/disable.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_clamp_state(struct msm_dsi_phy *phy, bool enable)
+{
+ if (!phy)
+ return -EINVAL;
+
+ pr_debug("[%s] enable=%d\n", phy->name, enable);
+
+ if (phy->hw.ops.clamp_ctrl)
+ phy->hw.ops.clamp_ctrl(&phy->hw, enable);
+
+ return 0;
+}
+
+/**
* dsi_phy_idle_ctrl() - enable/disable DSI PHY during idle screen
* @phy: DSI PHY handle
* @enable: boolean to specify PHY enable/disable.
@@ -1010,10 +1051,111 @@ int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
rc = phy->hw.ops.phy_timing_val(&phy->cfg.timing, timing, size);
if (!rc)
phy->cfg.is_phy_timing_present = true;
+
mutex_unlock(&phy->phy_lock);
return rc;
}
+/**
+ * dsi_phy_dynamic_refresh_trigger() - trigger dynamic refresh
+ * @phy: DSI PHY handle
+ * @is_master: Boolean to indicate if for master or slave.
+ */
+void dsi_phy_dynamic_refresh_trigger(struct msm_dsi_phy *phy, bool is_master)
+{
+ u32 off;
+
+ if (!phy)
+ return;
+
+ mutex_lock(&phy->phy_lock);
+ /*
+ * program PLL_SWI_INTF_SEL and SW_TRIGGER bit only for
+ * master and program SYNC_MODE bit only for slave.
+ */
+ if (is_master)
+ off = BIT(DYN_REFRESH_INTF_SEL) | BIT(DYN_REFRESH_SWI_CTRL) |
+ BIT(DYN_REFRESH_SW_TRIGGER);
+ else
+ off = BIT(DYN_REFRESH_SYNC_MODE) | BIT(DYN_REFRESH_SWI_CTRL);
+
+ if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper)
+ phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper(&phy->hw, off);
+
+ mutex_unlock(&phy->phy_lock);
+}
+
+/**
+ * dsi_phy_config_dynamic_refresh() - Configure dynamic refresh registers
+ * @phy: DSI PHY handle
+ * @delay: pipe delays for dynamic refresh
+ * @is_master: Boolean to indicate if for master or slave.
+ */
+void dsi_phy_config_dynamic_refresh(struct msm_dsi_phy *phy,
+ struct dsi_dyn_clk_delay *delay,
+ bool is_master)
+{
+ struct dsi_phy_cfg *cfg;
+
+ if (!phy)
+ return;
+
+ mutex_lock(&phy->phy_lock);
+
+ cfg = &phy->cfg;
+
+ if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_config)
+ phy->hw.ops.dyn_refresh_ops.dyn_refresh_config(&phy->hw, cfg,
+ is_master);
+ if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_pipe_delay)
+ phy->hw.ops.dyn_refresh_ops.dyn_refresh_pipe_delay(
+ &phy->hw, delay);
+
+ mutex_unlock(&phy->phy_lock);
+}
+
+/**
+ * dsi_phy_cache_phy_timings - cache the phy timings calculated as part of
+ * dynamic refresh.
+ * @phy: DSI PHY Handle.
+ * @dst: Pointer to cache location.
+ * @size: Number of phy lane settings.
+ */
+int dsi_phy_dyn_refresh_cache_phy_timings(struct msm_dsi_phy *phy, u32 *dst,
+ u32 size)
+{
+ int rc = 0;
+
+ if (!phy || !dst || !size)
+ return -EINVAL;
+
+ if (phy->hw.ops.dyn_refresh_ops.cache_phy_timings)
+ rc = phy->hw.ops.dyn_refresh_ops.cache_phy_timings(
+ &phy->cfg.timing, dst, size);
+
+ if (rc)
+ pr_err("failed to cache phy timings %d\n", rc);
+
+ return rc;
+}
+
+/**
+ * dsi_phy_dynamic_refresh_clear() - clear dynamic refresh config
+ * @phy: DSI PHY handle
+ */
+void dsi_phy_dynamic_refresh_clear(struct msm_dsi_phy *phy)
+{
+ if (!phy)
+ return;
+
+ mutex_lock(&phy->phy_lock);
+
+ if (phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper)
+ phy->hw.ops.dyn_refresh_ops.dyn_refresh_helper(&phy->hw, 0);
+
+ mutex_unlock(&phy->phy_lock);
+}
+
void dsi_phy_drv_register(void)
{
platform_driver_register(&dsi_phy_platform_driver);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
index 56d5ee3bd5d4..65c7a16aee81 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -218,6 +218,15 @@ int dsi_phy_clk_cb_register(struct msm_dsi_phy *phy,
int dsi_phy_idle_ctrl(struct msm_dsi_phy *phy, bool enable);
/**
+ * dsi_phy_set_clamp_state() - configure clamps for DSI lanes
+ * @phy: DSI PHY handle.
+ * @enable: boolean to specify clamp enable/disable.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_clamp_state(struct msm_dsi_phy *phy, bool enable);
+
+/**
* dsi_phy_set_clk_freq() - set DSI PHY clock frequency setting
* @phy: DSI PHY handle
* @clk_freq: link clock frequency
@@ -269,4 +278,45 @@ void dsi_phy_drv_register(void);
*/
void dsi_phy_drv_unregister(void);
+/**
+ * dsi_phy_update_phy_timings() - Update dsi phy timings
+ * @phy: DSI PHY handle
+ * @config: DSI Host config parameters
+ *
+ * Return: error code.
+ */
+int dsi_phy_update_phy_timings(struct msm_dsi_phy *phy,
+ struct dsi_host_config *config);
+
+/**
+ * dsi_phy_config_dynamic_refresh() - Configure dynamic refresh registers
+ * @phy: DSI PHY handle
+ * @delay: pipe delays for dynamic refresh
+ * @is_master: Boolean to indicate if for master or slave
+ */
+void dsi_phy_config_dynamic_refresh(struct msm_dsi_phy *phy,
+ struct dsi_dyn_clk_delay *delay,
+ bool is_master);
+/**
+ * dsi_phy_dynamic_refresh_trigger() - trigger dynamic refresh
+ * @phy: DSI PHY handle
+ * @is_master: Boolean to indicate if for master or slave.
+ */
+void dsi_phy_dynamic_refresh_trigger(struct msm_dsi_phy *phy, bool is_master);
+
+/**
+ * dsi_phy_dynamic_refresh_clear() - clear dynamic refresh config
+ * @phy: DSI PHY handle
+ */
+void dsi_phy_dynamic_refresh_clear(struct msm_dsi_phy *phy);
+
+/**
+ * dsi_phy_dyn_refresh_cache_phy_timings - cache the phy timings calculated
+ * as part of dynamic refresh.
+ * @phy: DSI PHY Handle.
+ * @dst: Pointer to cache location.
+ * @size: Number of phy lane settings.
+ */
+int dsi_phy_dyn_refresh_cache_phy_timings(struct msm_dsi_phy *phy,
+ u32 *dst, u32 size);
#endif /* _DSI_PHY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
index e31899d4f664..67a1157935d1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -159,6 +159,43 @@ struct phy_ulps_config_ops {
bool (*is_lanes_in_ulps)(u32 ulps, u32 ulps_lanes);
};
+struct phy_dyn_refresh_ops {
+ /**
+ * dyn_refresh_helper - helper function to config particular registers
+ * @phy: Pointer to DSI PHY hardware instance.
+ * @offset: register offset to program.
+ */
+ void (*dyn_refresh_helper)(struct dsi_phy_hw *phy, u32 offset);
+
+ /**
+ * dyn_refresh_config - configure dynamic refresh ctrl registers
+ * @phy: Pointer to DSI PHY hardware instance.
+ * @cfg: Pointer to DSI PHY timings.
+ * @is_master: Boolean to indicate whether for master or slave.
+ */
+ void (*dyn_refresh_config)(struct dsi_phy_hw *phy,
+ struct dsi_phy_cfg *cfg, bool is_master);
+
+ /**
+ * dyn_refresh_pipe_delay - configure pipe delay registers for dynamic
+ * refresh.
+ * @phy: Pointer to DSI PHY hardware instance.
+ * @delay: structure containing all the delays to be programed.
+ */
+ void (*dyn_refresh_pipe_delay)(struct dsi_phy_hw *phy,
+ struct dsi_dyn_clk_delay *delay);
+
+ /**
+ * cache_phy_timings - cache the phy timings calculated as part of
+ * dynamic refresh.
+ * @timings: Pointer to calculated phy timing parameters.
+ * @dst: Pointer to cache location.
+ * @size: Number of phy lane settings.
+ */
+ int (*cache_phy_timings)(struct dsi_phy_per_lane_cfgs *timings,
+ u32 *dst, u32 size);
+};
+
/**
* struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
* @regulator_enable: Enable PHY regulators.
@@ -218,11 +255,14 @@ struct dsi_phy_hw_ops {
* @mode: Mode information for which timing has to be calculated.
* @config: DSI host configuration for this mode.
* @timing: Timing parameters for each lane which will be returned.
+ * @use_mode_bit_clk: Boolean to indicate whether reacalculate dsi
+ * bitclk or use the existing bitclk(for dynamic clk case).
*/
int (*calculate_timing_params)(struct dsi_phy_hw *phy,
struct dsi_mode_info *mode,
struct dsi_host_common_cfg *config,
- struct dsi_phy_per_lane_cfgs *timing);
+ struct dsi_phy_per_lane_cfgs *timing,
+ bool use_mode_bit_clk);
/**
* phy_timing_val() - Gets PHY timing values.
@@ -234,6 +274,14 @@ struct dsi_phy_hw_ops {
u32 *timing, u32 size);
/**
+ * clamp_ctrl() - configure clamps for DSI lanes
+ * @phy: DSI PHY handle.
+ * @enable: boolean to specify clamp enable/disable.
+ * Return: error code.
+ */
+ void (*clamp_ctrl)(struct dsi_phy_hw *phy, bool enable);
+
+ /**
* phy_lane_reset() - Reset dsi phy lanes in case of error.
* @phy: Pointer to DSI PHY hardware object.
* Return: error code.
@@ -249,12 +297,15 @@ struct dsi_phy_hw_ops {
void *timing_ops;
struct phy_ulps_config_ops ulps_ops;
+ struct phy_dyn_refresh_ops dyn_refresh_ops;
};
/**
* struct dsi_phy_hw - DSI phy hardware object specific to an instance
* @base: VA for the DSI PHY base address.
* @length: Length of the DSI PHY register base map.
+ * @dyn_pll_base: VA for the DSI dynamic refresh base address.
+ * @length: Length of the DSI dynamic refresh register base map.
* @index: Instance ID of the controller.
* @version: DSI PHY version.
* @feature_map: Features supported by DSI PHY.
@@ -263,6 +314,8 @@ struct dsi_phy_hw_ops {
struct dsi_phy_hw {
void __iomem *base;
u32 length;
+ void __iomem *dyn_pll_base;
+ u32 dyn_refresh_len;
u32 index;
enum dsi_phy_version version;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
index b078231e70e3..4914df2e6bc1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v3_0.c
@@ -51,7 +51,6 @@
#define DSIPHY_CMN_LANE_STATUS0 0x0F4
#define DSIPHY_CMN_LANE_STATUS1 0x0F8
-
/* n = 0..3 for data lanes and n = 4 for clock lane */
#define DSIPHY_LNX_CFG0(n) (0x200 + (0x80 * (n)))
#define DSIPHY_LNX_CFG1(n) (0x204 + (0x80 * (n)))
@@ -66,6 +65,47 @@
#define DSIPHY_LNX_LPRX_CTRL(n) (0x228 + (0x80 * (n)))
#define DSIPHY_LNX_TX_DCTRL(n) (0x22C + (0x80 * (n)))
+/* dynamic refresh control registers */
+#define DSI_DYN_REFRESH_CTRL (0x000)
+#define DSI_DYN_REFRESH_PIPE_DELAY (0x004)
+#define DSI_DYN_REFRESH_PIPE_DELAY2 (0x008)
+#define DSI_DYN_REFRESH_PLL_DELAY (0x00C)
+#define DSI_DYN_REFRESH_STATUS (0x010)
+#define DSI_DYN_REFRESH_PLL_CTRL0 (0x014)
+#define DSI_DYN_REFRESH_PLL_CTRL1 (0x018)
+#define DSI_DYN_REFRESH_PLL_CTRL2 (0x01C)
+#define DSI_DYN_REFRESH_PLL_CTRL3 (0x020)
+#define DSI_DYN_REFRESH_PLL_CTRL4 (0x024)
+#define DSI_DYN_REFRESH_PLL_CTRL5 (0x028)
+#define DSI_DYN_REFRESH_PLL_CTRL6 (0x02C)
+#define DSI_DYN_REFRESH_PLL_CTRL7 (0x030)
+#define DSI_DYN_REFRESH_PLL_CTRL8 (0x034)
+#define DSI_DYN_REFRESH_PLL_CTRL9 (0x038)
+#define DSI_DYN_REFRESH_PLL_CTRL10 (0x03C)
+#define DSI_DYN_REFRESH_PLL_CTRL11 (0x040)
+#define DSI_DYN_REFRESH_PLL_CTRL12 (0x044)
+#define DSI_DYN_REFRESH_PLL_CTRL13 (0x048)
+#define DSI_DYN_REFRESH_PLL_CTRL14 (0x04C)
+#define DSI_DYN_REFRESH_PLL_CTRL15 (0x050)
+#define DSI_DYN_REFRESH_PLL_CTRL16 (0x054)
+#define DSI_DYN_REFRESH_PLL_CTRL17 (0x058)
+#define DSI_DYN_REFRESH_PLL_CTRL18 (0x05C)
+#define DSI_DYN_REFRESH_PLL_CTRL19 (0x060)
+#define DSI_DYN_REFRESH_PLL_CTRL20 (0x064)
+#define DSI_DYN_REFRESH_PLL_CTRL21 (0x068)
+#define DSI_DYN_REFRESH_PLL_CTRL22 (0x06C)
+#define DSI_DYN_REFRESH_PLL_CTRL23 (0x070)
+#define DSI_DYN_REFRESH_PLL_CTRL24 (0x074)
+#define DSI_DYN_REFRESH_PLL_CTRL25 (0x078)
+#define DSI_DYN_REFRESH_PLL_CTRL26 (0x07C)
+#define DSI_DYN_REFRESH_PLL_CTRL27 (0x080)
+#define DSI_DYN_REFRESH_PLL_CTRL28 (0x084)
+#define DSI_DYN_REFRESH_PLL_CTRL29 (0x088)
+#define DSI_DYN_REFRESH_PLL_CTRL30 (0x08C)
+#define DSI_DYN_REFRESH_PLL_CTRL31 (0x090)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR (0x094)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR2 (0x098)
+
static inline int dsi_conv_phy_to_logical_lane(
struct dsi_lane_map *lane_map, enum dsi_phy_data_lanes phy_lane)
{
@@ -196,10 +236,31 @@ static void dsi_phy_hw_v3_0_lane_settings(struct dsi_phy_hw *phy,
DSI_W32(phy, DSIPHY_LNX_OFFSET_BOT_CTRL(i), 0x0);
DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]);
}
+}
+
+void dsi_phy_hw_v3_0_clamp_ctrl(struct dsi_phy_hw *phy, bool enable)
+{
+ u32 reg;
+
+ pr_debug("enable=%s\n", enable ? "true" : "false");
+
+ /*
+ * DSI PHY lane clamps, also referred to as PHY FreezeIO is
+ * enalbed by default as part of the initialization sequnce.
+ * This would get triggered anytime the chip FreezeIO is asserted.
+ */
+ if (enable)
+ return;
- /* Toggle BIT 0 to release freeze I/0 */
- DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), 0x05);
- DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), 0x04);
+ /*
+ * Toggle BIT 0 to exlplictly release PHY freeze I/0 to disable
+ * the clamps.
+ */
+ reg = DSI_R32(phy, DSIPHY_LNX_TX_DCTRL(3));
+ DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), reg | BIT(0));
+ wmb(); /* Ensure that the freezeio bit is toggled */
+ DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(3), reg & ~BIT(0));
+ wmb(); /* Ensure that the freezeio bit is toggled */
}
/**
@@ -478,3 +539,163 @@ int dsi_phy_hw_timing_val_v3_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
timing_cfg->lane_v3[i] = timing_val[i];
return 0;
}
+
+void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+ struct dsi_phy_cfg *cfg, bool is_master)
+{
+ u32 reg;
+
+ if (is_master) {
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
+ DSIPHY_CMN_GLBL_CTRL, DSIPHY_CMN_VREG_CTRL,
+ 0x10, 0x59);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL10,
+ DSIPHY_CMN_TIMING_CTRL_0, DSIPHY_CMN_TIMING_CTRL_1,
+ cfg->timing.lane_v3[0], cfg->timing.lane_v3[1]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL11,
+ DSIPHY_CMN_TIMING_CTRL_2, DSIPHY_CMN_TIMING_CTRL_3,
+ cfg->timing.lane_v3[2], cfg->timing.lane_v3[3]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL12,
+ DSIPHY_CMN_TIMING_CTRL_4, DSIPHY_CMN_TIMING_CTRL_5,
+ cfg->timing.lane_v3[4], cfg->timing.lane_v3[5]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL13,
+ DSIPHY_CMN_TIMING_CTRL_6, DSIPHY_CMN_TIMING_CTRL_7,
+ cfg->timing.lane_v3[6], cfg->timing.lane_v3[7]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL14,
+ DSIPHY_CMN_TIMING_CTRL_8, DSIPHY_CMN_TIMING_CTRL_9,
+ cfg->timing.lane_v3[8], cfg->timing.lane_v3[9]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL15,
+ DSIPHY_CMN_TIMING_CTRL_10, DSIPHY_CMN_TIMING_CTRL_11,
+ cfg->timing.lane_v3[10], cfg->timing.lane_v3[11]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL16,
+ DSIPHY_CMN_CTRL_0, DSIPHY_CMN_LANE_CTRL0,
+ 0x7f, 0x1f);
+ } else {
+ reg = DSI_R32(phy, DSIPHY_CMN_CLK_CFG0);
+ reg &= ~BIT(5);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL0,
+ DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_PLL_CNTRL,
+ reg, 0x0);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL1,
+ DSIPHY_CMN_RBUF_CTRL, DSIPHY_CMN_GLBL_CTRL,
+ 0x0, 0x10);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL2,
+ DSIPHY_CMN_VREG_CTRL, DSIPHY_CMN_TIMING_CTRL_0,
+ 0x59, cfg->timing.lane_v3[0]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL3,
+ DSIPHY_CMN_TIMING_CTRL_1, DSIPHY_CMN_TIMING_CTRL_2,
+ cfg->timing.lane_v3[1], cfg->timing.lane_v3[2]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL4,
+ DSIPHY_CMN_TIMING_CTRL_3, DSIPHY_CMN_TIMING_CTRL_4,
+ cfg->timing.lane_v3[3], cfg->timing.lane_v3[4]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL5,
+ DSIPHY_CMN_TIMING_CTRL_5, DSIPHY_CMN_TIMING_CTRL_6,
+ cfg->timing.lane_v3[5], cfg->timing.lane_v3[6]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL6,
+ DSIPHY_CMN_TIMING_CTRL_7, DSIPHY_CMN_TIMING_CTRL_8,
+ cfg->timing.lane_v3[7], cfg->timing.lane_v3[8]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL7,
+ DSIPHY_CMN_TIMING_CTRL_9, DSIPHY_CMN_TIMING_CTRL_10,
+ cfg->timing.lane_v3[9], cfg->timing.lane_v3[10]);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL8,
+ DSIPHY_CMN_TIMING_CTRL_11, DSIPHY_CMN_CTRL_0,
+ cfg->timing.lane_v3[11], 0x7f);
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
+ DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_2,
+ 0x1f, 0x40);
+ /*
+ * fill with dummy register writes since controller will blindly
+ * send these values to DSI PHY.
+ */
+ reg = DSI_DYN_REFRESH_PLL_CTRL11;
+ while (reg <= DSI_DYN_REFRESH_PLL_CTRL29) {
+ DSI_DYN_REF_REG_W(phy->dyn_pll_base, reg,
+ DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_0,
+ 0x1f, 0x7f);
+ reg += 0x4;
+ }
+
+ DSI_GEN_W32(phy->dyn_pll_base,
+ DSI_DYN_REFRESH_PLL_UPPER_ADDR, 0);
+ DSI_GEN_W32(phy->dyn_pll_base,
+ DSI_DYN_REFRESH_PLL_UPPER_ADDR2, 0);
+ }
+
+ wmb(); /* make sure all registers are updated */
+}
+
+void dsi_phy_hw_v3_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
+ struct dsi_dyn_clk_delay *delay)
+{
+ if (!delay)
+ return;
+
+ DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY,
+ delay->pipe_delay);
+ DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY2,
+ delay->pipe_delay2);
+ DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_DELAY,
+ delay->pll_delay);
+}
+
+void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset)
+{
+ u32 reg;
+
+ /*
+ * if no offset is mentioned then this means we want to clear
+ * the dynamic refresh ctrl register which is the last step
+ * of dynamic refresh sequence.
+ */
+ if (!offset) {
+ reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+ reg &= ~(BIT(0) | BIT(8));
+ DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+ wmb(); /* ensure dynamic fps is cleared */
+ return;
+ }
+
+ if (offset & BIT(DYN_REFRESH_INTF_SEL)) {
+ reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+ reg |= BIT(13);
+ DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+ }
+
+ if (offset & BIT(DYN_REFRESH_SYNC_MODE)) {
+ reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+ reg |= BIT(16);
+ DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+ }
+
+ if (offset & BIT(DYN_REFRESH_SWI_CTRL)) {
+ reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+ reg |= BIT(0);
+ DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+ }
+
+ if (offset & BIT(DYN_REFRESH_SW_TRIGGER)) {
+ reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+ reg |= BIT(8);
+ DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+ wmb(); /* ensure dynamic fps is triggered */
+ }
+}
+
+int dsi_phy_hw_v3_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+ u32 *dst, u32 size)
+{
+ int i;
+
+ if (!timings || !dst || !size)
+ return -EINVAL;
+
+ if (size != DSI_PHY_TIMING_V3_SIZE) {
+ pr_err("size mis-match\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; i++)
+ dst[i] = timings->lane_v3[i];
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c
index e52a0f2b2ba6..60c75cef42f6 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_timing_calc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -511,11 +511,14 @@ error:
* @mode: Mode information for which timing has to be calculated.
* @config: DSI host configuration for this mode.
* @timing: Timing parameters for each lane which will be returned.
+ * @use_mode_bit_clk: Boolean to indicate whether reacalculate dsi
+ * bit clk or use the existing bit clk(for dynamic clk case).
*/
int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
- struct dsi_mode_info *mode,
- struct dsi_host_common_cfg *host,
- struct dsi_phy_per_lane_cfgs *timing)
+ struct dsi_mode_info *mode,
+ struct dsi_host_common_cfg *host,
+ struct dsi_phy_per_lane_cfgs *timing,
+ bool use_mode_bit_clk)
{
/* constants */
u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
@@ -541,7 +544,7 @@ int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
struct phy_timing_ops *ops = phy->ops.timing_ops;
memset(&desc, 0x0, sizeof(desc));
- h_total = DSI_H_TOTAL(mode);
+ h_total = DSI_H_TOTAL_DSC(mode);
v_total = DSI_V_TOTAL(mode);
bpp = bits_per_pixel[host->dst_format];
@@ -558,7 +561,10 @@ int dsi_phy_hw_calculate_timing_params(struct dsi_phy_hw *phy,
num_of_lanes++;
- x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+ if (use_mode_bit_clk)
+ x = mode->clk_rate_hz;
+ else
+ x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
y = rounddown(x, 1);
clk_params.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 95bdc3624767..5ada8010a664 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -25,6 +25,8 @@
#include "msm_fence.h"
#include "sde_trace.h"
+#define MULTIPLE_CONN_DETECTED(x) (x > 1)
+
struct msm_commit {
struct drm_device *dev;
struct drm_atomic_state *state;
@@ -111,6 +113,71 @@ static void commit_destroy(struct msm_commit *c)
kfree(c);
}
+static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state, bool enable)
+{
+ struct drm_connector *connector = NULL;
+ struct drm_connector_state *conn_state = NULL;
+ int i = 0;
+ int conn_cnt = 0;
+
+ if (msm_is_mode_seamless(&crtc_state->mode) ||
+ msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode) ||
+ msm_is_mode_seamless_dyn_clk(&crtc_state->adjusted_mode))
+ return true;
+
+ if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
+ return true;
+
+ if (!crtc_state->mode_changed && crtc_state->connectors_changed) {
+ for_each_connector_in_state(state, connector, conn_state, i) {
+ if ((conn_state->crtc == crtc_state->crtc) ||
+ (connector->state->crtc ==
+ crtc_state->crtc))
+ conn_cnt++;
+
+ if (MULTIPLE_CONN_DETECTED(conn_cnt))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static inline bool _msm_seamless_for_conn(struct drm_connector *connector,
+ struct drm_connector_state *old_conn_state, bool enable)
+{
+ if (!old_conn_state || !old_conn_state->crtc)
+ return false;
+
+ if (!old_conn_state->crtc->state->mode_changed &&
+ !old_conn_state->crtc->state->active_changed &&
+ old_conn_state->crtc->state->connectors_changed) {
+ if (old_conn_state->crtc == connector->state->crtc)
+ return true;
+ }
+
+ if (enable)
+ return false;
+
+ if (msm_is_mode_seamless(&connector->encoder->crtc->state->mode))
+ return true;
+
+ if (msm_is_mode_seamless_vrr(
+ &connector->encoder->crtc->state->adjusted_mode))
+ return true;
+
+ if (msm_is_mode_seamless_dyn_clk(
+ &connector->encoder->crtc->state->adjusted_mode))
+ return true;
+
+ if (msm_is_mode_seamless_dms(
+ &connector->encoder->crtc->state->adjusted_mode))
+ return true;
+
+ return false;
+}
+
static void msm_atomic_wait_for_commit_done(
struct drm_device *dev,
struct drm_atomic_state *old_state)
@@ -174,14 +241,7 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (WARN_ON(!encoder))
continue;
- if (msm_is_mode_seamless(
- &connector->encoder->crtc->state->mode) ||
- msm_is_mode_seamless_vrr(
- &connector->encoder->crtc->state->adjusted_mode))
- continue;
-
- if (msm_is_mode_seamless_dms(
- &connector->encoder->crtc->state->adjusted_mode))
+ if (_msm_seamless_for_conn(connector, old_conn_state, false))
continue;
funcs = encoder->helper_private;
@@ -223,11 +283,7 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_crtc_state->active)
continue;
- if (msm_is_mode_seamless(&crtc->state->mode) ||
- msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
- continue;
-
- if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode))
+ if (_msm_seamless_for_crtc(old_state, crtc->state, false))
continue;
funcs = crtc->helper_private;
@@ -286,8 +342,14 @@ msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode;
- if (!new_crtc_state->mode_changed)
+ if (!new_crtc_state->mode_changed &&
+ new_crtc_state->connectors_changed) {
+ if (_msm_seamless_for_conn(connector,
+ old_conn_state, false))
+ continue;
+ } else if (!new_crtc_state->mode_changed) {
continue;
+ }
DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
@@ -365,8 +427,7 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
if (!crtc->state->active)
continue;
- if (msm_is_mode_seamless(&crtc->state->mode) ||
- msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
+ if (_msm_seamless_for_crtc(old_state, crtc->state, true))
continue;
funcs = crtc->helper_private;
@@ -397,6 +458,9 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
connector->state->crtc->state))
continue;
+ if (_msm_seamless_for_conn(connector, old_conn_state, true))
+ continue;
+
encoder = connector->state->best_encoder;
funcs = encoder->helper_private;
@@ -444,6 +508,9 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
connector->state->crtc->state))
continue;
+ if (_msm_seamless_for_conn(connector, old_conn_state, true))
+ continue;
+
encoder = connector->state->best_encoder;
DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n",
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2202b6911d47..4909852952c5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -80,6 +80,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ if (obj->import_attach)
+ return msm_obj->pages;
+
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
@@ -567,8 +570,13 @@ void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
struct page **pages = get_pages(obj);
if (IS_ERR(pages))
return ERR_CAST(pages);
- msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ if (obj->import_attach)
+ msm_obj->vaddr = dma_buf_vmap(
+ obj->import_attach->dmabuf);
+ else
+ msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+
if (msm_obj->vaddr == NULL)
return ERR_PTR(-ENOMEM);
}
@@ -654,7 +662,11 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
return;
- vunmap(msm_obj->vaddr);
+ if (obj->import_attach)
+ dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+ else
+ vunmap(msm_obj->vaddr);
+
msm_obj->vaddr = NULL;
}
@@ -1006,7 +1018,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj = NULL;
uint32_t size;
- int ret, npages;
+ int ret;
/* if we don't have IOMMU, don't bother pretending we can import: */
if (!iommu_present(&platform_bus_type)) {
@@ -1027,19 +1039,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
drm_gem_private_object_init(dev, obj, size);
- npages = size / PAGE_SIZE;
-
msm_obj = to_msm_bo(obj);
msm_obj->sgt = sgt;
- msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (!msm_obj->pages) {
- ret = -ENOMEM;
- goto fail;
- }
-
- ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
- if (ret)
- goto fail;
+ msm_obj->pages = NULL;
return obj;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e99ff9c53f6f..f5f68533643d 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -38,6 +38,8 @@
#define MSM_MODE_FLAG_SEAMLESS_DMS (1<<2)
/* Request to switch the fps */
#define MSM_MODE_FLAG_SEAMLESS_VRR (1<<3)
+/* Request to switch the bit clk */
+#define MSM_MODE_FLAG_SEAMLESS_DYN_CLK (1<<4)
/* As there are different display controller blocks depending on the
* snapdragon version, the kms support is split out and the appropriate
@@ -175,6 +177,13 @@ static inline bool msm_is_mode_seamless_vrr(const struct drm_display_mode *mode)
: false;
}
+static inline bool msm_is_mode_seamless_dyn_clk(
+ const struct drm_display_mode *mode)
+{
+ return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYN_CLK)
+ : false;
+}
+
static inline bool msm_needs_vblank_pre_modeset(
const struct drm_display_mode *mode)
{
diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h
index bf08360e9862..b254d7dc981e 100644
--- a/drivers/gpu/drm/msm/sde/sde_ad4.h
+++ b/drivers/gpu/drm/msm/sde/sde_ad4.h
@@ -52,6 +52,7 @@ enum ad_property {
AD_IPC_SUSPEND,
AD_IPC_RESUME,
AD_IPC_RESET,
+ AD_VSYNC_UPDATE,
AD_PROPMAX,
};
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 8dc31d1e0872..0faae5a551bc 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -88,6 +88,7 @@ static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
enum ad_property ad_prop);
static void sde_cp_notify_hist_event(struct drm_crtc *crtc_drm, void *arg);
+static void sde_cp_update_ad_vsync_prop(struct sde_crtc *sde_crtc, u32 val);
#define setup_dspp_prop_install_funcs(func) \
do { \
@@ -138,6 +139,7 @@ enum {
SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS,
SDE_CP_CRTC_DSPP_AD_BACKLIGHT,
SDE_CP_CRTC_DSPP_AD_STRENGTH,
+ SDE_CP_CRTC_DSPP_AD_VSYNC_COUNT,
SDE_CP_CRTC_DSPP_MAX,
/* DSPP features end */
@@ -407,6 +409,7 @@ void sde_cp_crtc_init(struct drm_crtc *crtc)
if (IS_ERR(sde_crtc->hist_blob))
sde_crtc->hist_blob = NULL;
+ sde_crtc->ad_vsync_count = 0;
mutex_init(&sde_crtc->crtc_cp_lock);
INIT_LIST_HEAD(&sde_crtc->active_list);
INIT_LIST_HEAD(&sde_crtc->dirty_list);
@@ -789,6 +792,9 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
ad_cfg.prop = AD_MODE;
ad_cfg.hw_cfg = &hw_cfg;
hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+ sde_crtc->ad_vsync_count = 0;
+ sde_cp_update_ad_vsync_prop(sde_crtc,
+ sde_crtc->ad_vsync_count);
break;
case SDE_CP_CRTC_DSPP_AD_INIT:
if (!hw_dspp || !hw_dspp->ops.setup_ad) {
@@ -798,6 +804,9 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
ad_cfg.prop = AD_INIT;
ad_cfg.hw_cfg = &hw_cfg;
hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+ sde_crtc->ad_vsync_count = 0;
+ sde_cp_update_ad_vsync_prop(sde_crtc,
+ sde_crtc->ad_vsync_count);
break;
case SDE_CP_CRTC_DSPP_AD_CFG:
if (!hw_dspp || !hw_dspp->ops.setup_ad) {
@@ -807,6 +816,9 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
ad_cfg.prop = AD_CFG;
ad_cfg.hw_cfg = &hw_cfg;
hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+ sde_crtc->ad_vsync_count = 0;
+ sde_cp_update_ad_vsync_prop(sde_crtc,
+ sde_crtc->ad_vsync_count);
break;
case SDE_CP_CRTC_DSPP_AD_INPUT:
if (!hw_dspp || !hw_dspp->ops.setup_ad) {
@@ -816,6 +828,9 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
ad_cfg.prop = AD_INPUT;
ad_cfg.hw_cfg = &hw_cfg;
hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+ sde_crtc->ad_vsync_count = 0;
+ sde_cp_update_ad_vsync_prop(sde_crtc,
+ sde_crtc->ad_vsync_count);
break;
case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
if (!hw_dspp || !hw_dspp->ops.setup_ad) {
@@ -825,6 +840,9 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
ad_cfg.prop = AD_ASSERTIVE;
ad_cfg.hw_cfg = &hw_cfg;
hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+ sde_crtc->ad_vsync_count = 0;
+ sde_cp_update_ad_vsync_prop(sde_crtc,
+ sde_crtc->ad_vsync_count);
break;
case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
if (!hw_dspp || !hw_dspp->ops.setup_ad) {
@@ -834,6 +852,9 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
ad_cfg.prop = AD_BACKLIGHT;
ad_cfg.hw_cfg = &hw_cfg;
hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+ sde_crtc->ad_vsync_count = 0;
+ sde_cp_update_ad_vsync_prop(sde_crtc,
+ sde_crtc->ad_vsync_count);
break;
case SDE_CP_CRTC_DSPP_AD_STRENGTH:
if (!hw_dspp || !hw_dspp->ops.setup_ad) {
@@ -843,6 +864,9 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
ad_cfg.prop = AD_STRENGTH;
ad_cfg.hw_cfg = &hw_cfg;
hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+ sde_crtc->ad_vsync_count = 0;
+ sde_cp_update_ad_vsync_prop(sde_crtc,
+ sde_crtc->ad_vsync_count);
break;
default:
ret = -EINVAL;
@@ -924,10 +948,15 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc)
DRM_DEBUG_DRIVER("Dirty list is empty\n");
goto exit;
}
- sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESET);
set_dspp_flush = true;
}
+ if (!list_empty(&sde_crtc->ad_active)) {
+ sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESET);
+ sde_cp_ad_set_prop(sde_crtc, AD_VSYNC_UPDATE);
+ sde_cp_update_ad_vsync_prop(sde_crtc, sde_crtc->ad_vsync_count);
+ }
+
list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list,
dirty_list) {
sde_dspp_feature = crtc_feature_map[prop_node->feature];
@@ -1444,6 +1473,9 @@ static void dspp_ad_install_property(struct drm_crtc *crtc)
"SDE_DSPP_AD_V4_BACKLIGHT",
SDE_CP_CRTC_DSPP_AD_BACKLIGHT, 0, (BIT(16) - 1),
0);
+ sde_cp_crtc_install_range_property(crtc,
+ "SDE_DSPP_AD_V4_VSYNC_COUNT",
+ SDE_CP_CRTC_DSPP_AD_VSYNC_COUNT, 0, U32_MAX, 0);
break;
default:
DRM_ERROR("version %d not supported\n", version);
@@ -1862,6 +1894,11 @@ static void sde_cp_ad_set_prop(struct sde_crtc *sde_crtc,
hw_cfg.displayh = num_mixers * hw_lm->cfg.out_width;
hw_cfg.displayv = hw_lm->cfg.out_height;
hw_cfg.mixer_info = hw_lm;
+
+ if (ad_prop == AD_VSYNC_UPDATE) {
+ hw_cfg.payload = &sde_crtc->ad_vsync_count;
+ hw_cfg.len = sizeof(sde_crtc->ad_vsync_count);
+ }
ad_cfg.prop = ad_prop;
ad_cfg.hw_cfg = &hw_cfg;
ret = hw_dspp->ops.validate_ad(hw_dspp, (u32 *)&ad_prop);
@@ -2113,3 +2150,35 @@ int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
exit:
return ret;
}
+
+void sde_cp_update_ad_vsync_count(struct drm_crtc *crtc, u32 val)
+{
+ struct sde_crtc *sde_crtc;
+
+ if (!crtc) {
+ DRM_ERROR("invalid crtc %pK\n", crtc);
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ sde_crtc->ad_vsync_count = val;
+ sde_cp_update_ad_vsync_prop(sde_crtc, val);
+}
+
+static void sde_cp_update_ad_vsync_prop(struct sde_crtc *sde_crtc, u32 val)
+{
+ struct sde_cp_node *prop_node = NULL;
+
+ list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+ if (prop_node->feature == SDE_CP_CRTC_DSPP_AD_VSYNC_COUNT) {
+ prop_node->prop_val = val;
+ pr_debug("AD vsync count updated to %d\n", val);
+ return;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
index 7eb173852611..620db26775a9 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.h
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -146,4 +146,11 @@ void sde_cp_crtc_post_ipc(struct drm_crtc *crtc);
*/
int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
struct sde_irq_callback *hist_irq);
+
+/**
+ * sde_cp_update_ad_vsync_count: Api to update AD vsync count
+ * @crtc: Pointer to crtc.
+ * @val: vsync count value
+ */
+void sde_cp_update_ad_vsync_count(struct drm_crtc *crtc, u32 val);
#endif /*_SDE_COLOR_PROCESSING_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index d52101df69b1..b651d33d2c07 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -614,6 +614,7 @@ end:
void sde_connector_helper_bridge_disable(struct drm_connector *connector)
{
int rc;
+ struct sde_connector *c_conn = NULL;
if (!connector)
return;
@@ -624,6 +625,31 @@ void sde_connector_helper_bridge_disable(struct drm_connector *connector)
connector->base.id, rc);
SDE_EVT32(connector->base.id, SDE_EVTLOG_ERROR);
}
+
+ c_conn = to_sde_connector(connector);
+ if (c_conn->panel_dead) {
+ c_conn->bl_device->props.power = FB_BLANK_POWERDOWN;
+ c_conn->bl_device->props.state |= BL_CORE_FBBLANK;
+ backlight_update_status(c_conn->bl_device);
+ }
+}
+
+void sde_connector_helper_bridge_enable(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn = NULL;
+
+ if (!connector)
+ return;
+
+ c_conn = to_sde_connector(connector);
+
+ /* Special handling for ESD recovery case */
+ if (c_conn->panel_dead) {
+ c_conn->bl_device->props.power = FB_BLANK_UNBLANK;
+ c_conn->bl_device->props.state &= ~BL_CORE_FBBLANK;
+ backlight_update_status(c_conn->bl_device);
+ c_conn->panel_dead = false;
+ }
}
int sde_connector_clk_ctrl(struct drm_connector *connector, bool enable)
@@ -1721,15 +1747,23 @@ sde_connector_best_encoder(struct drm_connector *connector)
static void sde_connector_report_panel_dead(struct sde_connector *conn)
{
struct drm_event event;
- bool panel_dead = true;
if (!conn)
return;
+ /* Panel dead notification can come:
+ * 1) ESD thread
+ * 2) Commit thread (if TE stops coming)
+ * So such case, avoid failure notification twice.
+ */
+ if (conn->panel_dead)
+ return;
+
+ conn->panel_dead = true;
event.type = DRM_EVENT_PANEL_DEAD;
event.length = sizeof(bool);
msm_mode_object_event_notify(&conn->base.base,
- conn->base.dev, &event, (u8 *)&panel_dead);
+ conn->base.dev, &event, (u8 *)&conn->panel_dead);
sde_encoder_display_failure_notification(conn->encoder);
SDE_ERROR("esd check failed report PANEL_DEAD conn_id: %d enc_id: %d\n",
conn->base.base.id, conn->encoder->base.id);
@@ -1855,6 +1889,9 @@ static int sde_connector_populate_mode_info(struct drm_connector *conn,
sde_kms_info_add_keystr(info, "mode_name", mode->name);
+ sde_kms_info_add_keyint(info, "bit_clk_rate",
+ mode_info.clk_rate);
+
topology_idx = (int)sde_rm_get_topology_name(
mode_info.topology);
if (topology_idx < SDE_RM_TOPOLOGY_MAX) {
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 5fb7cd892c8a..51e1269ceabd 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -323,6 +323,8 @@ struct sde_connector_evt {
* @bl_device: backlight device node
* @status_work: work object to perform status checks
* @force_panel_dead: variable to trigger forced ESD recovery
+ * @panel_dead: Flag to indicate if panel has gone bad
+ * @esd_status_check: Flag to indicate if ESD thread is scheduled or not
* @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed
* @bl_scale: BL scale value for ABA feature
* @bl_scale_ad: BL scale value for AD feature
@@ -363,7 +365,7 @@ struct sde_connector {
struct backlight_device *bl_device;
struct delayed_work status_work;
u32 force_panel_dead;
-
+ bool panel_dead;
bool esd_status_check;
bool bl_scale_dirty;
@@ -767,6 +769,12 @@ void sde_connector_helper_bridge_disable(struct drm_connector *connector);
int sde_connector_esd_status(struct drm_connector *connector);
/**
+ * sde_connector_helper_bridge_enable - helper function for drm bridge enable
+ * @connector: Pointer to DRM connector object
+ */
+void sde_connector_helper_bridge_enable(struct drm_connector *connector);
+
+/**
* sde_connector_get_panel_vfp - helper to get panel vfp
* @connector: pointer to drm connector
* @h_active: panel width
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 8e48d6a4b565..65145010a861 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -676,8 +676,9 @@ static bool sde_crtc_mode_fixup(struct drm_crtc *crtc,
SDE_DEBUG("\n");
if ((msm_is_mode_seamless(adjusted_mode) ||
- msm_is_mode_seamless_vrr(adjusted_mode)) &&
- (!crtc->enabled)) {
+ (msm_is_mode_seamless_vrr(adjusted_mode) ||
+ msm_is_mode_seamless_dyn_clk(adjusted_mode))) &&
+ (!crtc->enabled)) {
SDE_ERROR("crtc state prevents seamless transition\n");
return false;
}
@@ -4286,6 +4287,7 @@ static void sde_crtc_disable(struct drm_crtc *crtc)
event.type = DRM_EVENT_CRTC_POWER;
event.length = sizeof(u32);
sde_cp_crtc_suspend(crtc);
+ sde_cp_update_ad_vsync_count(crtc, 0);
power_on = 0;
msm_mode_object_event_notify(&crtc->base, crtc->dev, &event,
(u8 *)&power_on);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index f5a814540b53..0a3de1842674 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -199,6 +199,7 @@ struct sde_crtc_event {
* @dirty_list : list of color processing features are dirty
* @ad_dirty: list containing ad properties that are dirty
* @ad_active: list containing ad properties that are active
+ * @ad_vsync_count : count of vblank since last reset for AD
* @crtc_lock : crtc lock around create, destroy and access.
* @frame_pending : Whether or not an update is pending
* @frame_events : static allocation of in-flight frame events
@@ -266,6 +267,7 @@ struct sde_crtc {
struct list_head ad_dirty;
struct list_head ad_active;
struct list_head user_event_list;
+ u32 ad_vsync_count;
struct mutex crtc_lock;
struct mutex crtc_cp_lock;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 526a4dd52aef..803307d91403 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -264,6 +264,8 @@ struct sde_encoder_virt {
struct sde_rect cur_conn_roi;
struct sde_rect prv_conn_roi;
struct drm_crtc *crtc;
+
+ bool elevated_ahb_vote;
};
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -1835,6 +1837,7 @@ static int _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
return rc;
}
+ sde_enc->elevated_ahb_vote = true;
/* enable DSI clks */
rc = sde_connector_clk_ctrl(sde_enc->cur_master->connector,
true);
@@ -3156,6 +3159,8 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
struct sde_hw_ctl *ctl;
uint32_t i, pending_flush;
unsigned long lock_flags;
+ struct msm_drm_private *priv = NULL;
+ struct sde_kms *sde_kms = NULL;
if (!sde_enc) {
SDE_ERROR("invalid encoder\n");
@@ -3233,6 +3238,20 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
_sde_encoder_trigger_start(sde_enc->cur_master);
spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+
+ if (sde_enc->elevated_ahb_vote) {
+ priv = sde_enc->base.dev->dev_private;
+ if (priv != NULL) {
+ sde_kms = to_sde_kms(priv->kms);
+ if (sde_kms != NULL) {
+ sde_power_scale_reg_bus(&priv->phandle,
+ sde_kms->core_client,
+ VOTE_INDEX_LOW,
+ false);
+ }
+ }
+ sde_enc->elevated_ahb_vote = false;
+ }
}
static void _sde_encoder_ppsplit_swap_intf_for_right_only_update(
@@ -4855,7 +4874,7 @@ int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder)
int sde_encoder_display_failure_notification(struct drm_encoder *enc)
{
- struct msm_drm_thread *disp_thread = NULL;
+ struct msm_drm_thread *event_thread = NULL;
struct msm_drm_private *priv = NULL;
struct sde_encoder_virt *sde_enc = NULL;
@@ -4867,7 +4886,7 @@ int sde_encoder_display_failure_notification(struct drm_encoder *enc)
priv = enc->dev->dev_private;
sde_enc = to_sde_encoder_virt(enc);
if (!sde_enc->crtc || (sde_enc->crtc->index
- >= ARRAY_SIZE(priv->disp_thread))) {
+ >= ARRAY_SIZE(priv->event_thread))) {
SDE_DEBUG_ENC(sde_enc,
"invalid cached CRTC: %d or crtc index: %d\n",
sde_enc->crtc == NULL,
@@ -4877,15 +4896,12 @@ int sde_encoder_display_failure_notification(struct drm_encoder *enc)
SDE_EVT32_VERBOSE(DRMID(enc));
- disp_thread = &priv->disp_thread[sde_enc->crtc->index];
- if (current->tgid == disp_thread->thread->tgid) {
- sde_encoder_resource_control(&sde_enc->base,
- SDE_ENC_RC_EVENT_KICKOFF);
- } else {
- kthread_queue_work(&disp_thread->worker,
- &sde_enc->esd_trigger_work);
- kthread_flush_work(&sde_enc->esd_trigger_work);
- }
+ event_thread = &priv->event_thread[sde_enc->crtc->index];
+
+ kthread_queue_work(&event_thread->worker,
+ &sde_enc->esd_trigger_work);
+ kthread_flush_work(&sde_enc->esd_trigger_work);
+
/**
* panel may stop generating te signal (vsync) during esd failure. rsc
* hardware may hang without vsync. Avoid rsc hang by generating the
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 828d771acedb..972ba22f437a 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -348,6 +348,24 @@ static void _sde_encoder_phys_cmd_setup_irq_hw_idx(
struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_irq *irq;
+ struct sde_kms *sde_kms = phys_enc->sde_kms;
+ int ret = 0;
+
+ mutex_lock(&sde_kms->vblank_ctl_global_lock);
+
+ if (atomic_read(&phys_enc->vblank_refcount)) {
+ SDE_ERROR(
+ "vblank_refcount mismatch detected, try to reset %d\n",
+ atomic_read(&phys_enc->vblank_refcount));
+ ret = sde_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_RDPTR);
+ if (ret)
+ SDE_ERROR(
+ "control vblank irq registration error %d\n",
+ ret);
+
+ }
+ atomic_set(&phys_enc->vblank_refcount, 0);
irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->hw_idx = phys_enc->hw_ctl->idx;
@@ -368,6 +386,8 @@ static void _sde_encoder_phys_cmd_setup_irq_hw_idx(
irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE];
irq->hw_idx = phys_enc->hw_pp->idx;
irq->irq_idx = -EINVAL;
+
+ mutex_unlock(&sde_kms->vblank_ctl_global_lock);
}
static void sde_encoder_phys_cmd_cont_splash_mode_set(
@@ -674,12 +694,15 @@ static int sde_encoder_phys_cmd_control_vblank_irq(
to_sde_encoder_phys_cmd(phys_enc);
int ret = 0;
int refcount;
+ struct sde_kms *sde_kms;
if (!phys_enc || !phys_enc->hw_pp) {
SDE_ERROR("invalid encoder\n");
return -EINVAL;
}
+ sde_kms = phys_enc->sde_kms;
+ mutex_lock(&sde_kms->vblank_ctl_global_lock);
refcount = atomic_read(&phys_enc->vblank_refcount);
/* Slave encoders don't report vblank */
@@ -697,11 +720,17 @@ static int sde_encoder_phys_cmd_control_vblank_irq(
SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
enable, refcount);
- if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) {
ret = sde_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
- else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ if (ret)
+ atomic_dec_return(&phys_enc->vblank_refcount);
+ } else if (!enable &&
+ atomic_dec_return(&phys_enc->vblank_refcount) == 0) {
ret = sde_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_RDPTR);
+ if (ret)
+ atomic_inc_return(&phys_enc->vblank_refcount);
+ }
end:
if (ret) {
@@ -713,6 +742,7 @@ end:
enable, refcount, SDE_EVTLOG_ERROR);
}
+ mutex_unlock(&sde_kms->vblank_ctl_global_lock);
return ret;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index d09054ecb552..719b8c1e8d0c 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -275,97 +275,97 @@ static const struct sde_format sde_format_map[] = {
INTERLEAVED_RGB_FMT(ARGB1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ARGB4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
index 66445da368fc..e60defd2c35b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -109,6 +109,9 @@ static int ad4_ipc_reset_setup_ipcr(struct sde_hw_dspp *dspp,
static int ad4_cfg_ipc_reset(struct sde_hw_dspp *dspp,
struct sde_ad_hw_cfg *cfg);
+static int ad4_vsync_update(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg);
+
static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
[ad4_state_idle][AD_MODE] = ad4_mode_setup_common,
[ad4_state_idle][AD_INIT] = ad4_init_setup_idle,
@@ -121,6 +124,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
[ad4_state_idle][AD_IPC_SUSPEND] = ad4_no_op_setup,
[ad4_state_idle][AD_IPC_RESUME] = ad4_no_op_setup,
[ad4_state_idle][AD_IPC_RESET] = ad4_no_op_setup,
+ [ad4_state_idle][AD_VSYNC_UPDATE] = ad4_no_op_setup,
[ad4_state_startup][AD_MODE] = ad4_mode_setup_common,
[ad4_state_startup][AD_INIT] = ad4_init_setup,
@@ -133,6 +137,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
[ad4_state_startup][AD_STRENGTH] = ad4_no_op_setup,
[ad4_state_startup][AD_IPC_RESUME] = ad4_no_op_setup,
[ad4_state_startup][AD_IPC_RESET] = ad4_ipc_reset_setup_startup,
+ [ad4_state_startup][AD_VSYNC_UPDATE] = ad4_vsync_update,
[ad4_state_run][AD_MODE] = ad4_mode_setup_common,
[ad4_state_run][AD_INIT] = ad4_init_setup_run,
@@ -145,6 +150,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
[ad4_state_run][AD_IPC_SUSPEND] = ad4_ipc_suspend_setup_run,
[ad4_state_run][AD_IPC_RESUME] = ad4_no_op_setup,
[ad4_state_run][AD_IPC_RESET] = ad4_setup_debug,
+ [ad4_state_run][AD_VSYNC_UPDATE] = ad4_vsync_update,
[ad4_state_ipcs][AD_MODE] = ad4_no_op_setup,
[ad4_state_ipcs][AD_INIT] = ad4_no_op_setup,
@@ -157,6 +163,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
[ad4_state_ipcs][AD_IPC_SUSPEND] = ad4_no_op_setup,
[ad4_state_ipcs][AD_IPC_RESUME] = ad4_ipc_resume_setup_ipcs,
[ad4_state_ipcs][AD_IPC_RESET] = ad4_no_op_setup,
+ [ad4_state_ipcs][AD_VSYNC_UPDATE] = ad4_no_op_setup,
[ad4_state_ipcr][AD_MODE] = ad4_mode_setup_common,
[ad4_state_ipcr][AD_INIT] = ad4_init_setup_ipcr,
@@ -169,6 +176,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
[ad4_state_ipcr][AD_IPC_SUSPEND] = ad4_ipc_suspend_setup_ipcr,
[ad4_state_ipcr][AD_IPC_RESUME] = ad4_no_op_setup,
[ad4_state_ipcr][AD_IPC_RESET] = ad4_ipc_reset_setup_ipcr,
+ [ad4_state_ipcr][AD_VSYNC_UPDATE] = ad4_no_op_setup,
[ad4_state_manual][AD_MODE] = ad4_mode_setup_common,
[ad4_state_manual][AD_INIT] = ad4_init_setup,
@@ -181,6 +189,7 @@ static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
[ad4_state_manual][AD_IPC_SUSPEND] = ad4_no_op_setup,
[ad4_state_manual][AD_IPC_RESUME] = ad4_no_op_setup,
[ad4_state_manual][AD_IPC_RESET] = ad4_setup_debug_manual,
+ [ad4_state_manual][AD_VSYNC_UPDATE] = ad4_no_op_setup,
};
struct ad4_info {
@@ -201,6 +210,7 @@ struct ad4_info {
u32 irdx_control_0;
u32 tf_ctrl;
u32 vc_control_0;
+ u32 frame_pushes;
};
static struct ad4_info info[DSPP_MAX] = {
@@ -905,6 +915,8 @@ static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
val = (ad_cfg->cfg_param_046 & (BIT(16) - 1));
SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+ info[dspp->idx].frame_pushes = ad_cfg->cfg_param_047;
+
return 0;
}
@@ -1546,3 +1558,25 @@ static int ad4_strength_setup_idle(struct sde_hw_dspp *dspp,
ad4_mode_setup(dspp, info[dspp->idx].mode);
return 0;
}
+
+static int ad4_vsync_update(struct sde_hw_dspp *dspp,
+ struct sde_ad_hw_cfg *cfg)
+{
+ u32 *count;
+ struct sde_hw_mixer *hw_lm;
+
+ if (cfg->hw_cfg->len != sizeof(u32) || !cfg->hw_cfg->payload) {
+ DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+ sizeof(u32), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+ return -EINVAL;
+ }
+
+ count = (u32 *)(cfg->hw_cfg->payload);
+ hw_lm = cfg->hw_cfg->mixer_info;
+
+ if (hw_lm && !hw_lm->cfg.right_mixer &&
+ (*count < info[dspp->idx].frame_pushes))
+ (*count)++;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 1b7d80b182e3..1a9a28b0efd1 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -604,6 +604,12 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
return;
}
+ if (sde_kms->first_kickoff) {
+ sde_power_scale_reg_bus(&priv->phandle, sde_kms->core_client,
+ VOTE_INDEX_HIGH, false);
+ sde_kms->first_kickoff = false;
+ }
+
for_each_crtc_in_state(state, crtc, crtc_state, i) {
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
head) {
@@ -2723,8 +2729,10 @@ static void sde_kms_handle_power_event(u32 event_type, void *usr)
if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
sde_irq_update(msm_kms, true);
sde_vbif_init_memtypes(sde_kms);
+ sde_kms->first_kickoff = true;
} else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
sde_irq_update(msm_kms, false);
+ sde_kms->first_kickoff = false;
}
}
@@ -3052,6 +3060,8 @@ static int sde_kms_hw_init(struct msm_kms *kms)
dev->mode_config.max_width = sde_kms->catalog->max_mixer_width * 2;
dev->mode_config.max_height = 4096;
+ mutex_init(&sde_kms->vblank_ctl_global_lock);
+
/*
* Support format modifiers for compression etc.
*/
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 48f85bf8dc76..04527efdc4a4 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -226,6 +226,9 @@ struct sde_kms {
void **dp_displays;
bool has_danger_ctrl;
+ struct mutex vblank_ctl_global_lock;
+
+ bool first_kickoff;
};
struct vsync_info {
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 582909580be9..d8f75c05f739 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -3002,6 +3002,37 @@ static int sde_dbg_reg_base_release(struct inode *inode, struct file *file)
return 0;
}
+/**
+ * sde_dbg_reg_base_is_valid_range - verify if requested memory range is valid
+ * @off: address offset in bytes
+ * @cnt: memory size in bytes
+ * Return: true if valid; false otherwise
+ */
+static bool sde_dbg_reg_base_is_valid_range(u32 off, u32 cnt)
+{
+ static struct sde_dbg_base *dbg_base = &sde_dbg_base;
+ struct sde_dbg_reg_range *node;
+ struct sde_dbg_reg_base *base;
+
+ pr_debug("check offset=0x%x cnt=0x%x\n", off, cnt);
+
+ list_for_each_entry(base, &dbg_base->reg_base_list, reg_base_head) {
+ list_for_each_entry(node, &base->sub_range_list, head) {
+ pr_debug("%s: start=0x%x end=0x%x\n", node->range_name,
+ node->offset.start, node->offset.end);
+
+ if (node->offset.start <= off
+ && off <= node->offset.end
+ && off + cnt <= node->offset.end) {
+ pr_debug("valid range requested\n");
+ return true;
+ }
+ }
+ }
+
+ pr_err("invalid range requested\n");
+ return false;
+}
/**
* sde_dbg_reg_base_offset_write - set new offset and len to debugfs reg base
@@ -3048,6 +3079,9 @@ static ssize_t sde_dbg_reg_base_offset_write(struct file *file,
if (cnt == 0)
return -EINVAL;
+ if (!sde_dbg_reg_base_is_valid_range(off, cnt))
+ return -EINVAL;
+
mutex_lock(&sde_dbg_base.mutex);
dbg->off = off;
dbg->cnt = cnt;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index c62df3d7ba27..26150bacb7c2 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -327,7 +327,7 @@ kgsl_mem_entry_destroy(struct kref *kref)
entry->memdesc.sgt->nents, i) {
page = sg_page(sg);
for (j = 0; j < (sg->length >> PAGE_SHIFT); j++)
- set_page_dirty(nth_page(page, j));
+ set_page_dirty_lock(nth_page(page, j));
}
}
diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c
index 4daed7f4875f..fa55b6e8d163 100644
--- a/drivers/i2c/busses/i2c-msm-v2.c
+++ b/drivers/i2c/busses/i2c-msm-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2321,6 +2321,12 @@ i2c_msm_frmwrk_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
struct i2c_msm_ctrl *ctrl = i2c_get_adapdata(adap);
struct i2c_msm_xfer *xfer = &ctrl->xfer;
+ if (num < 1) {
+ dev_err(ctrl->dev,
+ "error on number of msgs(%d) received\n", num);
+ return -EINVAL;
+ }
+
if (IS_ERR_OR_NULL(msgs)) {
dev_err(ctrl->dev, " error on msgs Accessing invalid pointer location\n");
return PTR_ERR(msgs);
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index c7f6c6b00d1b..3050db563ab3 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -116,6 +116,7 @@ struct geni_i2c_dev {
struct msm_gpi_dma_async_tx_cb_param tx_cb;
struct msm_gpi_dma_async_tx_cb_param rx_cb;
enum i2c_se_mode se_mode;
+ bool autosuspend_disable;
};
struct geni_i2c_err_log {
@@ -600,6 +601,7 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
{
struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
int i, ret = 0, timeout = 0;
+ int ref = 0;
gi2c->err = 0;
gi2c->cur = &msgs[0];
@@ -613,6 +615,12 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
pm_runtime_set_suspended(gi2c->dev);
return ret;
}
+ ref = atomic_read(&gi2c->dev->power.usage_count);
+ if (ref <= 0) {
+ GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+ "resume usage count mismatch:%d\n", ref);
+ }
+
if (gi2c->se_mode == GSI_ONLY) {
ret = geni_i2c_gsi_xfer(adap, msgs, num);
goto geni_i2c_txn_ret;
@@ -716,8 +724,17 @@ geni_i2c_txn_ret:
if (ret == 0)
ret = num;
- pm_runtime_mark_last_busy(gi2c->dev);
- pm_runtime_put_autosuspend(gi2c->dev);
+ if (gi2c->autosuspend_disable) {
+ pm_runtime_put_sync(gi2c->dev);
+ ref = atomic_read(&gi2c->dev->power.usage_count);
+ if (ref < 0)
+ GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+ "suspend usage count mismatch:%d\n", ref);
+ } else {
+ pm_runtime_mark_last_busy(gi2c->dev);
+ pm_runtime_put_autosuspend(gi2c->dev);
+ }
+
gi2c->cur = NULL;
gi2c->err = 0;
dev_dbg(gi2c->dev, "i2c txn ret:%d\n", ret);
@@ -830,6 +847,9 @@ static int geni_i2c_probe(struct platform_device *pdev)
gi2c->i2c_rsc.clk_freq_out = KHz(400);
}
+ gi2c->autosuspend_disable = of_property_read_bool(pdev->dev.of_node,
+ "qcom,disable-autosuspend");
+
gi2c->irq = platform_get_irq(pdev, 0);
if (gi2c->irq < 0) {
dev_err(gi2c->dev, "IRQ error for i2c-geni\n");
@@ -861,8 +881,11 @@ static int geni_i2c_probe(struct platform_device *pdev)
strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
pm_runtime_set_suspended(gi2c->dev);
- pm_runtime_set_autosuspend_delay(gi2c->dev, I2C_AUTO_SUSPEND_DELAY);
- pm_runtime_use_autosuspend(gi2c->dev);
+ if (!gi2c->autosuspend_disable) {
+ pm_runtime_set_autosuspend_delay(gi2c->dev,
+ I2C_AUTO_SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(gi2c->dev);
+ }
pm_runtime_enable(gi2c->dev);
i2c_add_adapter(&gi2c->adap);
@@ -950,6 +973,9 @@ static int geni_i2c_runtime_resume(struct device *dev)
"i2c fifo/se-dma mode. fifo depth:%d\n",
gi2c_tx_depth);
}
+ if (gi2c->autosuspend_disable)
+ GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+ "i2c in autosuspend disable mode\n");
}
if (gi2c->se_mode == FIFO_SE_DMA)
enable_irq(gi2c->irq);
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 7e6287c986ef..31482f99f37f 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -546,12 +546,22 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
av8l_fast_iopte *ptep;
unsigned long flags;
struct sg_mapping_iter miter;
- unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
+ size_t count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
int prot = IOMMU_READ | IOMMU_WRITE; /* TODO: extract from attrs */
bool is_coherent = is_dma_coherent(dev, attrs);
pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
struct page **pages;
+ /*
+ * sg_alloc_table_from_pages accepts unsigned int value for count
+ * so check count doesn't exceed UINT_MAX.
+ */
+
+ if (count > UINT_MAX) {
+ dev_err(dev, "count: %zx exceeds UNIT_MAX\n", count);
+ return NULL;
+ }
+
prot = __get_iommu_pgprot(attrs, prot, is_coherent);
*handle = DMA_ERROR_CODE;
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 25b85ab146a2..87a1244206ba 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1349,6 +1349,7 @@ static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
struct iommu_debug_device *ddev = file->private_data;
struct device *dev = ddev->dev;
char c[2];
+ size_t buflen = sizeof(c);
if (*offset)
return 0;
@@ -1359,13 +1360,14 @@ static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
c[0] = dev->archdata.mapping->domain ? '1' : '0';
c[1] = '\n';
- if (copy_to_user(ubuf, &c, 2)) {
+ buflen = min(count, buflen);
+ if (copy_to_user(ubuf, &c, buflen)) {
pr_err("copy_to_user failed\n");
return -EFAULT;
}
*offset = 1; /* non-zero means we're done */
- return 2;
+ return buflen;
}
static const struct file_operations iommu_debug_dma_attach_fops = {
@@ -1393,7 +1395,7 @@ static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
else
snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
- buflen = strlen(buf);
+ buflen = min(count, strlen(buf));
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
retval = -EFAULT;
@@ -1424,19 +1426,21 @@ static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
{
struct iommu_debug_device *ddev = file->private_data;
char c[2];
+ size_t buflen = sizeof(c);
if (*offset)
return 0;
c[0] = ddev->domain ? '1' : '0';
c[1] = '\n';
- if (copy_to_user(ubuf, &c, 2)) {
+ buflen = min(count, buflen);
+ if (copy_to_user(ubuf, &c, buflen)) {
pr_err("copy_to_user failed\n");
return -EFAULT;
}
*offset = 1; /* non-zero means we're done */
- return 2;
+ return buflen;
}
static const struct file_operations iommu_debug_attach_fops = {
@@ -1514,7 +1518,7 @@ static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
else
snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
- buflen = strlen(buf);
+ buflen = min(count, strlen(buf));
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
retval = -EFAULT;
@@ -1583,7 +1587,7 @@ static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
snprintf(buf, 100, "%pa\n", &phys);
}
- buflen = strlen(buf);
+ buflen = min(count, strlen(buf));
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
retval = -EFAULT;
@@ -1636,7 +1640,7 @@ static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
else
snprintf(buf, sizeof(buf), "%pa\n", &phys);
- buflen = strlen(buf);
+ buflen = min(count, strlen(buf));
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
retval = -EFAULT;
@@ -1869,7 +1873,7 @@ static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
iova = ddev->iova;
snprintf(buf, sizeof(buf), "%pa\n", &iova);
- buflen = strlen(buf);
+ buflen = min(count, strlen(buf));
if (copy_to_user(ubuf, buf, buflen)) {
pr_err("Couldn't copy_to_user\n");
retval = -EFAULT;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index bc6d12113ef8..6c9446e589b8 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -229,7 +229,7 @@ static void gic_unmask_irq(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d)
{
- writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+ writel_relaxed_no_log(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
}
static void gic_eoimode1_eoi_irq(struct irq_data *d)
@@ -340,8 +340,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
gic_lock_irqsave(flags);
mask = 0xff << shift;
bit = gic_cpu_map[cpu] << shift;
- val = readl_relaxed(reg) & ~mask;
- writel_relaxed(val | bit, reg);
+ val = readl_relaxed_no_log(reg) & ~mask;
+ writel_relaxed_no_log(val | bit, reg);
gic_unlock_irqrestore(flags);
return IRQ_SET_MASK_OK_DONE;
@@ -355,19 +355,21 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
void __iomem *cpu_base = gic_data_cpu_base(gic);
do {
- irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+ irqstat = readl_relaxed_no_log(cpu_base + GIC_CPU_INTACK);
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
if (likely(irqnr > 15 && irqnr < 1020)) {
if (static_key_true(&supports_deactivate))
- writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+ writel_relaxed_no_log(irqstat,
+ cpu_base + GIC_CPU_EOI);
handle_domain_irq(gic->domain, irqnr, regs);
continue;
}
if (irqnr < 16) {
- writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+ writel_relaxed_no_log(irqstat, cpu_base + GIC_CPU_EOI);
if (static_key_true(&supports_deactivate))
- writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
+ writel_relaxed_no_log(irqstat,
+ cpu_base + GIC_CPU_DEACTIVATE);
#ifdef CONFIG_SMP
/*
* Ensure any shared data written by the CPU sending
@@ -656,16 +658,18 @@ void gic_cpu_save(struct gic_chip_data *gic)
ptr = raw_cpu_ptr(gic->saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
- ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+ ptr[i] = readl_relaxed_no_log(dist_base +
+ GIC_DIST_ENABLE_SET + i * 4);
ptr = raw_cpu_ptr(gic->saved_ppi_active);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
- ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+ ptr[i] = readl_relaxed_no_log(dist_base +
+ GIC_DIST_ACTIVE_SET + i * 4);
ptr = raw_cpu_ptr(gic->saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
- ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
-
+ ptr[i] = readl_relaxed_no_log(dist_base +
+ GIC_DIST_CONFIG + i * 4);
}
void gic_cpu_restore(struct gic_chip_data *gic)
@@ -686,27 +690,31 @@ void gic_cpu_restore(struct gic_chip_data *gic)
ptr = raw_cpu_ptr(gic->saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
- writel_relaxed(GICD_INT_EN_CLR_X32,
+ writel_relaxed_no_log(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
- writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+ writel_relaxed_no_log(ptr[i], dist_base +
+ GIC_DIST_ENABLE_SET + i * 4);
}
ptr = raw_cpu_ptr(gic->saved_ppi_active);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
- writel_relaxed(GICD_INT_EN_CLR_X32,
+ writel_relaxed_no_log(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
- writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
+ writel_relaxed_no_log(ptr[i], dist_base +
+ GIC_DIST_ACTIVE_SET + i * 4);
}
ptr = raw_cpu_ptr(gic->saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
- writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+ writel_relaxed_no_log(ptr[i], dist_base +
+ GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
- writel_relaxed(GICD_INT_DEF_PRI_X4,
+ writel_relaxed_no_log(GICD_INT_DEF_PRI_X4,
dist_base + GIC_DIST_PRI + i * 4);
- writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
+ writel_relaxed_no_log(GICC_INT_PRI_THRESHOLD,
+ cpu_base + GIC_CPU_PRIMASK);
gic_cpu_if_up(gic);
}
@@ -799,7 +807,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
if (unlikely(nr_cpu_ids == 1)) {
/* Only one CPU? let's do a self-IPI... */
- writel_relaxed(2 << 24 | irq,
+ writel_relaxed_no_log(2 << 24 | irq,
gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
return;
}
@@ -817,7 +825,8 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
dmb(ishst);
/* this always happens on GIC0 */
- writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+ writel_relaxed_no_log(map << 16 | irq,
+ gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
gic_unlock_irqrestore(flags);
}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
index 03f6e0c4d5c8..ff8be3570bc5 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -195,11 +195,11 @@ struct cam_cdm_hw_intf_cmd_submit_bl {
struct cam_cdm_bl_request *data;
};
-/* struct cam_cdm_hw_mem - CDM hw memory.struct */
+/* struct cam_cdm_hw_mem - CDM hw memory struct */
struct cam_cdm_hw_mem {
int32_t handle;
uint32_t vaddr;
- uint64_t kmdvaddr;
+ uintptr_t kmdvaddr;
size_t size;
};
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
index f2796be6d682..4ae2f0a3bee2 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -624,7 +624,8 @@ static void cam_hw_cdm_work(struct work_struct *work)
}
static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain,
- struct device *dev, unsigned long iova, int flags, void *token)
+ struct device *dev, unsigned long iova, int flags, void *token,
+ uint32_t buf_info)
{
struct cam_hw_info *cdm_hw = NULL;
struct cam_cdm *core = NULL;
@@ -910,7 +911,7 @@ int cam_hw_cdm_probe(struct platform_device *pdev)
CAM_ERR(CAM_CDM, "cpas-cdm get iommu handle failed");
goto unlock_release_mem;
}
- cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+ cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
cam_hw_cdm_iommu_fault_handler, cdm_hw);
rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH);
@@ -1034,7 +1035,7 @@ release_platform_resource:
flush_workqueue(cdm_core->work_queue);
destroy_workqueue(cdm_core->work_queue);
destroy_non_secure_hdl:
- cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+ cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
NULL, cdm_hw);
if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
@@ -1106,8 +1107,8 @@ int cam_hw_cdm_remove(struct platform_device *pdev)
if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
- cam_smmu_reg_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
- NULL, cdm_hw);
+ cam_smmu_unset_client_page_fault_handler(
+ cdm_core->iommu_hdl.non_secure, cdm_hw);
mutex_destroy(&cdm_hw->hw_mutex);
kfree(cdm_hw->soc_info.soc_private);
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
index e4ec08b41504..f753b34514bd 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -166,8 +166,7 @@ int cam_cdm_acquire(struct cam_cdm_acquire_data *data)
struct cam_hw_intf *hw;
uint32_t hw_index = 0;
- if ((!data) || (!data->identifier) || (!data->base_array) ||
- (!data->base_array_cnt))
+ if (!data || !data->base_array_cnt)
return -EINVAL;
if (get_cdm_mgr_refcount()) {
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
index 2b00a87544fa..6aa6e6d2bc4c 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
@@ -105,10 +105,10 @@ struct cam_cdm_bl_cmd {
union {
int32_t mem_handle;
uint32_t *hw_iova;
- void *kernel_iova;
+ uintptr_t kernel_iova;
} bl_addr;
- uint32_t offset;
- uint32_t len;
+ uint32_t offset;
+ uint32_t len;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
index d76f344c1efa..9021ecabb27c 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -87,7 +87,7 @@ int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
mutex_lock(&client->lock);
for (i = 0; i < req->data->cmd_arrary_count ; i++) {
- uint64_t vaddr_ptr = 0;
+ uintptr_t vaddr_ptr = 0;
size_t len = 0;
if ((!cdm_cmd->cmd[i].len) &&
@@ -106,8 +106,7 @@ int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
} else if (req->data->type ==
CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA) {
rc = 0;
- vaddr_ptr =
- (uint64_t)cdm_cmd->cmd[i].bl_addr.kernel_iova;
+ vaddr_ptr = cdm_cmd->cmd[i].bl_addr.kernel_iova;
len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
} else {
CAM_ERR(CAM_CDM,
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
index da785271053f..8a3dbba5ec19 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,6 +42,7 @@ int cam_context_shutdown(struct cam_context *ctx)
int rc = 0;
int32_t ctx_hdl = ctx->dev_hdl;
+ mutex_lock(&ctx->ctx_mutex);
if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev) {
rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev(
ctx, NULL);
@@ -54,6 +55,7 @@ int cam_context_shutdown(struct cam_context *ctx)
if (rc < 0)
CAM_ERR(CAM_CORE, "Error while dev release %d", rc);
}
+ mutex_unlock(&ctx->ctx_mutex);
if (!rc)
rc = cam_destroy_device_hdl(ctx_hdl);
@@ -221,6 +223,27 @@ int cam_context_handle_crm_process_evt(struct cam_context *ctx,
return rc;
}
+int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
+ uint32_t buf_info)
+{
+ int rc = 0;
+
+ if (!ctx->state_machine) {
+ CAM_ERR(CAM_CORE, "Context is not ready");
+ return -EINVAL;
+ }
+
+ if (ctx->state_machine[ctx->state].pagefault_ops) {
+ rc = ctx->state_machine[ctx->state].pagefault_ops(ctx, iova,
+ buf_info);
+ } else {
+ CAM_WARN(CAM_CORE, "No dump ctx in dev %d, state %d",
+ ctx->dev_hdl, ctx->state);
+ }
+
+ return rc;
+}
+
int cam_context_handle_acquire_dev(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
{
@@ -431,7 +454,7 @@ int cam_context_init(struct cam_context *ctx,
mutex_init(&ctx->sync_mutex);
spin_lock_init(&ctx->lock);
- ctx->dev_name = dev_name;
+ strlcpy(ctx->dev_name, dev_name, CAM_CTX_DEV_NAME_MAX_LENGTH);
ctx->dev_id = dev_id;
ctx->ctx_id = ctx_id;
ctx->ctx_crm_intf = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index ffceea22ae0f..8bc4340b9f06 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,9 @@
/* Forward declarations */
struct cam_context;
+/* max device name string length*/
+#define CAM_CTX_DEV_NAME_MAX_LENGTH 20
+
/* max request number */
#define CAM_CTX_REQ_MAX 20
#define CAM_CTX_CFG_MAX 20
@@ -57,23 +60,25 @@ enum cam_context_state {
* @num_out_acked: Number of out fence acked
* @flushed: Request is flushed
* @ctx: The context to which this request belongs
+ * @pf_data page fault debug data
*
*/
struct cam_ctx_request {
- struct list_head list;
- uint32_t status;
- uint64_t request_id;
+ struct list_head list;
+ uint32_t status;
+ uint64_t request_id;
void *req_priv;
- struct cam_hw_update_entry hw_update_entries[CAM_CTX_CFG_MAX];
- uint32_t num_hw_update_entries;
- struct cam_hw_fence_map_entry in_map_entries[CAM_CTX_CFG_MAX];
- uint32_t num_in_map_entries;
- struct cam_hw_fence_map_entry out_map_entries[CAM_CTX_CFG_MAX];
- uint32_t num_out_map_entries;
- atomic_t num_in_acked;
- uint32_t num_out_acked;
- int flushed;
- struct cam_context *ctx;
+ struct cam_hw_update_entry hw_update_entries[CAM_CTX_CFG_MAX];
+ uint32_t num_hw_update_entries;
+ struct cam_hw_fence_map_entry in_map_entries[CAM_CTX_CFG_MAX];
+ uint32_t num_in_map_entries;
+ struct cam_hw_fence_map_entry out_map_entries[CAM_CTX_CFG_MAX];
+ uint32_t num_out_map_entries;
+ atomic_t num_in_acked;
+ uint32_t num_out_acked;
+ int flushed;
+ struct cam_context *ctx;
+ struct cam_hw_mgr_dump_pf_data pf_data;
};
/**
@@ -135,12 +140,14 @@ struct cam_ctx_crm_ops {
* @ioctl_ops: Ioctl funciton table
* @crm_ops: CRM to context interface function table
* @irq_ops: Hardware event handle function
+ * @pagefault_ops: Function to be called on page fault
*
*/
struct cam_ctx_ops {
struct cam_ctx_ioctl_ops ioctl_ops;
struct cam_ctx_crm_ops crm_ops;
cam_hw_event_cb_func irq_ops;
+ cam_hw_pagefault_cb_func pagefault_ops;
};
/**
@@ -175,7 +182,7 @@ struct cam_ctx_ops {
*
*/
struct cam_context {
- const char *dev_name;
+ char dev_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
uint64_t dev_id;
uint32_t ctx_id;
struct list_head list;
@@ -292,6 +299,19 @@ int cam_context_handle_crm_process_evt(struct cam_context *ctx,
struct cam_req_mgr_link_evt_data *process_evt);
/**
+ * cam_context_dump_pf_info()
+ *
+ * @brief: Handle dump active request request command
+ *
+ * @ctx: Object pointer for cam_context
+ * @iova: Page fault address
+ * @buf_info: Information about closest memory handle
+ *
+ */
+int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
+ uint32_t buf_info);
+
+/**
* cam_context_handle_acquire_dev()
*
* @brief: Handle acquire device command
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 6c2383ed8110..8021f12934a9 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -272,7 +272,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
int rc = 0;
struct cam_ctx_request *req = NULL;
struct cam_hw_prepare_update_args cfg;
- uint64_t packet_addr;
+ uintptr_t packet_addr;
struct cam_packet *packet;
size_t len = 0;
int32_t i = 0, j = 0;
@@ -315,8 +315,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
/* for config dev, only memory handle is supported */
/* map packet from the memhandle */
rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
- (uint64_t *) &packet_addr,
- &len);
+ &packet_addr, &len);
if (rc != 0) {
CAM_ERR(CAM_CTXT, "[%s][%d] Can not get packet address",
ctx->dev_name, ctx->ctx_id);
@@ -324,7 +323,8 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
goto free_req;
}
- packet = (struct cam_packet *) (packet_addr + cmd->offset);
+ packet = (struct cam_packet *) ((uint8_t *)packet_addr +
+ (uint32_t)cmd->offset);
/* preprocess the configuration */
memset(&cfg, 0, sizeof(cfg));
@@ -337,6 +337,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
cfg.out_map_entries = req->out_map_entries;
cfg.max_in_map_entries = CAM_CTX_CFG_MAX;
cfg.in_map_entries = req->in_map_entries;
+ cfg.pf_data = &(req->pf_data);
rc = ctx->hw_mgr_intf->hw_prepare_update(
ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
@@ -905,3 +906,38 @@ int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
end:
return rc;
}
+
+int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
+ struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
+ bool *mem_found)
+{
+ int rc = 0;
+ struct cam_hw_cmd_args cmd_args;
+
+ if (!ctx) {
+ CAM_ERR(CAM_CTXT, "Invalid input params %pK ", ctx);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!ctx->hw_mgr_intf) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+ ctx->dev_name, ctx->ctx_id);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ if (ctx->hw_mgr_intf->hw_cmd) {
+ cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+ cmd_args.cmd_type = CAM_HW_MGR_CMD_DUMP_PF_INFO;
+ cmd_args.u.pf_args.pf_data.packet = packet;
+ cmd_args.u.pf_args.iova = iova;
+ cmd_args.u.pf_args.buf_info = buf_info;
+ cmd_args.u.pf_args.mem_found = mem_found;
+ ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+ &cmd_args);
+ }
+
+end:
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
index 9b95eaddb3c7..43e69405ee3b 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,5 +31,8 @@ int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx);
int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd);
+int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
+ struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
+ bool *mem_found);
#endif /* _CAM_CONTEXT_UTILS_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
index f7990b6d5d4a..54b0f4d63bf8 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -13,6 +13,9 @@
#ifndef _CAM_HW_MGR_INTF_H_
#define _CAM_HW_MGR_INTF_H_
+#include <linux/time.h>
+#include <linux/types.h>
+
/*
* This file declares Constants, Enums, Structures and APIs to be used as
* Interface between HW Manager and Context.
@@ -29,6 +32,10 @@
typedef int (*cam_hw_event_cb_func)(void *context, uint32_t evt_id,
void *evt_data);
+/* hardware page fault callback function type */
+typedef int (*cam_hw_pagefault_cb_func)(void *context, unsigned long iova,
+ uint32_t buf_info);
+
/**
* struct cam_hw_update_entry - Entry for hardware config
*
@@ -44,7 +51,7 @@ struct cam_hw_update_entry {
uint32_t offset;
uint32_t len;
uint32_t flags;
- uint64_t addr;
+ uintptr_t addr;
};
/**
@@ -89,7 +96,7 @@ struct cam_hw_acquire_args {
void *context_data;
cam_hw_event_cb_func event_cb;
uint32_t num_acq;
- uint64_t acquire_info;
+ uintptr_t acquire_info;
void *ctxt_to_hw_map;
};
@@ -131,6 +138,16 @@ struct cam_hw_stop_args {
void *args;
};
+
+/**
+ * struct cam_hw_mgr_dump_pf_data - page fault debug data
+ *
+ * packet: pointer to packet
+ */
+struct cam_hw_mgr_dump_pf_data {
+ void *packet;
+};
+
/**
* struct cam_hw_prepare_update_args - Payload for prepare command
*
@@ -146,6 +163,7 @@ struct cam_hw_stop_args {
* @in_map_entries: Actual input fence mapping list (returned)
* @num_in_map_entries: Number of acutal input fence mapping (returned)
* @priv: Private pointer of hw update
+ * @pf_data: Debug data for page fault
*
*/
struct cam_hw_prepare_update_args {
@@ -161,6 +179,7 @@ struct cam_hw_prepare_update_args {
struct cam_hw_fence_map_entry *in_map_entries;
uint32_t num_in_map_entries;
void *priv;
+ struct cam_hw_mgr_dump_pf_data *pf_data;
};
/**
@@ -207,6 +226,48 @@ struct cam_hw_flush_args {
};
/**
+ * struct cam_hw_dump_pf_args - Payload for dump pf info command
+ *
+ * @pf_data: Debug data for page fault
+ * @iova: Page fault address
+ * @buf_info: Info about memory buffer where page
+ * fault occurred
+ * @mem_found: If fault memory found in current
+ * request
+ *
+ */
+struct cam_hw_dump_pf_args {
+ struct cam_hw_mgr_dump_pf_data pf_data;
+ unsigned long iova;
+ uint32_t buf_info;
+ bool *mem_found;
+};
+
+/* enum cam_hw_mgr_command - Hardware manager command type */
+enum cam_hw_mgr_command {
+ CAM_HW_MGR_CMD_INTERNAL,
+ CAM_HW_MGR_CMD_DUMP_PF_INFO,
+};
+
+/**
+ * struct cam_hw_cmd_args - Payload for hw manager command
+ *
+ * @ctxt_to_hw_map: HW context from the acquire
+ * @cmd_type HW command type
+ * @internal_args Arguments for internal command
+ * @pf_args Arguments for Dump PF info command
+ *
+ */
+struct cam_hw_cmd_args {
+ void *ctxt_to_hw_map;
+ uint32_t cmd_type;
+ union {
+ void *internal_args;
+ struct cam_hw_dump_pf_args pf_args;
+ } u;
+};
+
+/**
* cam_hw_mgr_intf - HW manager interface
*
* @hw_mgr_priv: HW manager object
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 3f24c6d41a3f..0f3f2859a420 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,7 +34,7 @@ static void cam_node_print_ctx_state(
spin_lock(&ctx->lock);
CAM_INFO(CAM_CORE,
"[%s][%d] : state=%d, refcount=%d, active_req_list=%d, pending_req_list=%d, wait_req_list=%d, free_req_list=%d",
- ctx->dev_name ? ctx->dev_name : "null",
+ ctx->dev_name,
i, ctx->state,
atomic_read(&(ctx->refcount.refcount)),
list_empty(&ctx->active_req_list),
@@ -154,6 +154,12 @@ static int __cam_node_handle_start_dev(struct cam_node *node,
return -EINVAL;
}
+ if (strcmp(node->name, ctx->dev_name)) {
+ CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+ node->name, ctx->dev_name);
+ return -EINVAL;
+ }
+
rc = cam_context_handle_start_dev(ctx, start);
if (rc)
CAM_ERR(CAM_CORE, "Start failure for node %s", node->name);
@@ -187,6 +193,12 @@ static int __cam_node_handle_stop_dev(struct cam_node *node,
return -EINVAL;
}
+ if (strcmp(node->name, ctx->dev_name)) {
+ CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+ node->name, ctx->dev_name);
+ return -EINVAL;
+ }
+
rc = cam_context_handle_stop_dev(ctx, stop);
if (rc)
CAM_ERR(CAM_CORE, "Stop failure for node %s", node->name);
@@ -220,6 +232,12 @@ static int __cam_node_handle_config_dev(struct cam_node *node,
return -EINVAL;
}
+ if (strcmp(node->name, ctx->dev_name)) {
+ CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+ node->name, ctx->dev_name);
+ return -EINVAL;
+ }
+
rc = cam_context_handle_config_dev(ctx, config);
if (rc)
CAM_ERR(CAM_CORE, "Config failure for node %s", node->name);
@@ -253,6 +271,12 @@ static int __cam_node_handle_flush_dev(struct cam_node *node,
return -EINVAL;
}
+ if (strcmp(node->name, ctx->dev_name)) {
+ CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+ node->name, ctx->dev_name);
+ return -EINVAL;
+ }
+
rc = cam_context_handle_flush_dev(ctx, flush);
if (rc)
CAM_ERR(CAM_CORE, "Flush failure for node %s", node->name);
@@ -286,20 +310,36 @@ static int __cam_node_handle_release_dev(struct cam_node *node,
return -EINVAL;
}
- rc = cam_context_handle_release_dev(ctx, release);
- if (rc)
- CAM_ERR(CAM_CORE, "context release failed node %s", node->name);
+ if (strcmp(node->name, ctx->dev_name)) {
+ CAM_ERR(CAM_CORE, "node name %s dev name:%s not matching",
+ node->name, ctx->dev_name);
+ return -EINVAL;
+ }
+ if (ctx->state > CAM_CTX_UNINIT && ctx->state < CAM_CTX_STATE_MAX) {
+ rc = cam_context_handle_release_dev(ctx, release);
+ if (rc)
+ CAM_ERR(CAM_CORE, "context release failed for node %s",
+ node->name);
+ } else {
+ CAM_WARN(CAM_CORE,
+ "node %s context id %u state %d invalid to release hdl",
+ node->name, ctx->ctx_id, ctx->state);
+ goto destroy_dev_hdl;
+ }
+
+ cam_context_putref(ctx);
+
+destroy_dev_hdl:
rc = cam_destroy_device_hdl(release->dev_handle);
if (rc)
- CAM_ERR(CAM_CORE, "destroy device handle is failed node %s",
+ CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s",
node->name);
CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d",
node->name, ctx->ctx_id,
atomic_read(&(ctx->refcount.refcount)));
- cam_context_putref(ctx);
return rc;
}
@@ -420,6 +460,9 @@ int cam_node_shutdown(struct cam_node *node)
for (i = 0; i < node->ctx_size; i++) {
if (node->ctx_list[i].dev_hdl > 0) {
+ CAM_DBG(CAM_CORE,
+ "Node [%s] invoking shutdown on context [%d]",
+ node->name, i);
rc = cam_context_shutdown(&(node->ctx_list[i]));
if (rc)
continue;
@@ -491,7 +534,7 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
case CAM_QUERY_CAP: {
struct cam_query_cap_cmd query;
- if (copy_from_user(&query, (void __user *)cmd->handle,
+ if (copy_from_user(&query, u64_to_user_ptr(cmd->handle),
sizeof(query))) {
rc = -EFAULT;
break;
@@ -504,7 +547,7 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
break;
}
- if (copy_to_user((void __user *)cmd->handle, &query,
+ if (copy_to_user(u64_to_user_ptr(cmd->handle), &query,
sizeof(query)))
rc = -EFAULT;
@@ -513,7 +556,7 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
case CAM_ACQUIRE_DEV: {
struct cam_acquire_dev_cmd acquire;
- if (copy_from_user(&acquire, (void __user *)cmd->handle,
+ if (copy_from_user(&acquire, u64_to_user_ptr(cmd->handle),
sizeof(acquire))) {
rc = -EFAULT;
break;
@@ -524,7 +567,7 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
rc);
break;
}
- if (copy_to_user((void __user *)cmd->handle, &acquire,
+ if (copy_to_user(u64_to_user_ptr(cmd->handle), &acquire,
sizeof(acquire)))
rc = -EFAULT;
break;
@@ -532,7 +575,7 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
case CAM_START_DEV: {
struct cam_start_stop_dev_cmd start;
- if (copy_from_user(&start, (void __user *)cmd->handle,
+ if (copy_from_user(&start, u64_to_user_ptr(cmd->handle),
sizeof(start)))
rc = -EFAULT;
else {
@@ -546,7 +589,7 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
case CAM_STOP_DEV: {
struct cam_start_stop_dev_cmd stop;
- if (copy_from_user(&stop, (void __user *)cmd->handle,
+ if (copy_from_user(&stop, u64_to_user_ptr(cmd->handle),
sizeof(stop)))
rc = -EFAULT;
else {
@@ -560,7 +603,7 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
case CAM_CONFIG_DEV: {
struct cam_config_dev_cmd config;
- if (copy_from_user(&config, (void __user *)cmd->handle,
+ if (copy_from_user(&config, u64_to_user_ptr(cmd->handle),
sizeof(config)))
rc = -EFAULT;
else {
@@ -574,7 +617,7 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
case CAM_RELEASE_DEV: {
struct cam_release_dev_cmd release;
- if (copy_from_user(&release, (void __user *)cmd->handle,
+ if (copy_from_user(&release, u64_to_user_ptr(cmd->handle),
sizeof(release)))
rc = -EFAULT;
else {
@@ -588,7 +631,7 @@ int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
case CAM_FLUSH_REQ: {
struct cam_flush_dev_cmd flush;
- if (copy_from_user(&flush, (void __user *)cmd->handle,
+ if (copy_from_user(&flush, u64_to_user_ptr(cmd->handle),
sizeof(flush)))
rc = -EFAULT;
else {
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.h b/drivers/media/platform/msm/camera/cam_core/cam_node.h
index 4303ee38dd54..e270bb4105fd 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,7 +18,6 @@
#include "cam_hw_mgr_intf.h"
#include "cam_req_mgr_interface.h"
-#define CAM_NODE_NAME_LENGTH_MAX 256
#define CAM_NODE_STATE_UNINIT 0
#define CAM_NODE_STATE_INIT 1
@@ -38,7 +37,7 @@
*
*/
struct cam_node {
- char name[CAM_NODE_NAME_LENGTH_MAX];
+ char name[CAM_CTX_DEV_NAME_MAX_LENGTH];
uint32_t state;
/* context pool */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 053447e331a8..948485fa6474 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -933,7 +933,7 @@ static int cam_cpas_hw_start(void *hw_priv, void *start_args,
}
if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) {
- CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
sizeof(struct cam_cpas_hw_cmd_start), arg_size);
return -EINVAL;
}
@@ -1065,7 +1065,7 @@ static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
}
if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) {
- CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
sizeof(struct cam_cpas_hw_cmd_stop), arg_size);
return -EINVAL;
}
@@ -1168,7 +1168,7 @@ static int cam_cpas_hw_init(void *hw_priv, void *init_hw_args,
}
if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
- CAM_ERR(CAM_CPAS, "INIT HW size mismatch %ld %d",
+ CAM_ERR(CAM_CPAS, "INIT HW size mismatch %zd %d",
sizeof(struct cam_cpas_hw_caps), arg_size);
return -EINVAL;
}
@@ -1325,7 +1325,7 @@ static int cam_cpas_hw_get_hw_info(void *hw_priv,
}
if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
- CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %ld %d",
+ CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
sizeof(struct cam_cpas_hw_caps), arg_size);
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index d51b15291142..eb8b156a4fc2 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -23,7 +23,7 @@
#define CAM_CPAS_AXI_MIN_MNOC_AB_BW (2048 * 1024)
#define CAM_CPAS_AXI_MIN_MNOC_IB_BW (2048 * 1024)
#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
-#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000L)
+#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000UL)
#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index cdc8a3baef28..a9f1e4f8364e 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -367,7 +367,7 @@ int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
case CAM_QUERY_CAP: {
struct cam_cpas_query_cap query;
- rc = copy_from_user(&query, (void __user *) cmd->handle,
+ rc = copy_from_user(&query, u64_to_user_ptr(cmd->handle),
sizeof(query));
if (rc) {
CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d",
@@ -381,7 +381,7 @@ int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
if (rc)
break;
- rc = copy_to_user((void __user *) cmd->handle, &query,
+ rc = copy_to_user(u64_to_user_ptr(cmd->handle), &query,
sizeof(query));
if (rc)
CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc);
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
index 99c509c62809..70ff72c39028 100644
--- a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,7 @@
#include "cam_fd_context.h"
#include "cam_trace.h"
-static const char fd_dev_name[] = "fd";
+static const char fd_dev_name[] = "cam-fd";
/* Functions in Available state */
static int __cam_fd_ctx_acquire_dev_in_available(struct cam_context *ctx,
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index 09388fe40c70..7e78f458ec2b 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -417,7 +417,7 @@ static int cam_fd_packet_generic_blob_handler(void *user_data,
uint32_t *get_raw_results = (uint32_t *)blob_data;
if (sizeof(uint32_t) != blob_size) {
- CAM_ERR(CAM_FD, "Invalid blob size %lu %u",
+ CAM_ERR(CAM_FD, "Invalid blob size %zu %u",
sizeof(uint32_t), blob_size);
return -EINVAL;
}
@@ -430,7 +430,7 @@ static int cam_fd_packet_generic_blob_handler(void *user_data,
(struct cam_fd_soc_clock_bw_request *)blob_data;
if (sizeof(struct cam_fd_soc_clock_bw_request) != blob_size) {
- CAM_ERR(CAM_FD, "Invalid blob size %lu %u",
+ CAM_ERR(CAM_FD, "Invalid blob size %zu %u",
sizeof(struct cam_fd_soc_clock_bw_request),
blob_size);
return -EINVAL;
@@ -537,7 +537,7 @@ static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
uint32_t i, j, plane, num_out_buf, num_in_buf;
struct cam_buf_io_cfg *io_cfg;
dma_addr_t io_addr[CAM_PACKET_MAX_PLANES];
- uint64_t cpu_addr[CAM_PACKET_MAX_PLANES];
+ uintptr_t cpu_addr[CAM_PACKET_MAX_PLANES];
size_t size;
bool need_io_map, need_cpu_map;
@@ -583,7 +583,7 @@ static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
rc = cam_mem_get_io_buf(
io_cfg[i].mem_handle[plane],
iommu_hdl, &io_addr[plane], &size);
- if ((rc) || (io_addr[plane] >> 32)) {
+ if (rc) {
CAM_ERR(CAM_FD,
"Invalid io buf %d %d %d %d",
io_cfg[i].direction,
@@ -599,7 +599,8 @@ static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
rc = cam_mem_get_cpu_buf(
io_cfg[i].mem_handle[plane],
&cpu_addr[plane], &size);
- if (rc) {
+ if (rc || ((io_addr[plane] & 0xFFFFFFFF)
+ != io_addr[plane])) {
CAM_ERR(CAM_FD,
"Invalid cpu buf %d %d %d %d",
io_cfg[i].direction,
@@ -1088,8 +1089,10 @@ static int cam_fd_mgr_hw_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
struct cam_fd_hw_mgr *hw_mgr = hw_mgr_priv;
struct cam_query_cap_cmd *query = hw_get_caps_args;
struct cam_fd_query_cap_cmd query_fd;
+ void __user *caps_handle =
+ u64_to_user_ptr(query->caps_handle);
- if (copy_from_user(&query_fd, (void __user *)query->caps_handle,
+ if (copy_from_user(&query_fd, caps_handle,
sizeof(struct cam_fd_query_cap_cmd))) {
CAM_ERR(CAM_FD, "Failed in copy from user, rc=%d", rc);
return -EFAULT;
@@ -1106,7 +1109,7 @@ static int cam_fd_mgr_hw_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
query_fd.hw_caps.wrapper_version.major,
query_fd.hw_caps.wrapper_version.minor);
- if (copy_to_user((void __user *)query->caps_handle, &query_fd,
+ if (copy_to_user(caps_handle, &query_fd,
sizeof(struct cam_fd_query_cap_cmd)))
rc = -EFAULT;
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
index 87dc6949de0c..fa648c7a773e 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
@@ -651,7 +651,7 @@ int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
}
if (arg_size != sizeof(struct cam_fd_hw_init_args)) {
- CAM_ERR(CAM_FD, "Invalid arg size %u, %lu", arg_size,
+ CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
sizeof(struct cam_fd_hw_init_args));
return -EINVAL;
}
@@ -735,7 +735,7 @@ int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
}
if (arg_size != sizeof(struct cam_fd_hw_deinit_args)) {
- CAM_ERR(CAM_FD, "Invalid arg size %u, %lu", arg_size,
+ CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
sizeof(struct cam_fd_hw_deinit_args));
return -EINVAL;
}
@@ -859,7 +859,7 @@ int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
}
if (arg_size != sizeof(struct cam_fd_hw_cmd_start_args)) {
- CAM_ERR(CAM_FD, "Invalid arg size %u, %lu", arg_size,
+ CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
sizeof(struct cam_fd_hw_cmd_start_args));
return -EINVAL;
}
@@ -1010,7 +1010,7 @@ int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size)
}
if (arg_size != sizeof(struct cam_fd_hw_reserve_args)) {
- CAM_ERR(CAM_FD, "Invalid arg size %u, %lu", arg_size,
+ CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
sizeof(struct cam_fd_hw_reserve_args));
return -EINVAL;
}
@@ -1079,7 +1079,7 @@ int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size)
}
if (arg_size != sizeof(struct cam_fd_hw_release_args)) {
- CAM_ERR(CAM_FD, "Invalid arg size %u, %lu", arg_size,
+ CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
sizeof(struct cam_fd_hw_release_args));
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
index aae7648ba1e2..ef3b6c9314e2 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -111,7 +111,7 @@ struct cam_fd_hw_io_buffer {
struct cam_buf_io_cfg *io_cfg;
uint32_t num_buf;
uint64_t io_addr[CAM_PACKET_MAX_PLANES];
- uint64_t cpu_addr[CAM_PACKET_MAX_PLANES];
+ uintptr_t cpu_addr[CAM_PACKET_MAX_PLANES];
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 502c95d4c60e..7ad562e058af 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,8 +25,45 @@
#include "cam_mem_mgr.h"
#include "cam_trace.h"
#include "cam_debug_util.h"
+#include "cam_packet_util.h"
-static const char icp_dev_name[] = "icp";
+static const char icp_dev_name[] = "cam-icp";
+
+static int cam_icp_context_dump_active_request(void *data, unsigned long iova,
+ uint32_t buf_info)
+{
+ struct cam_context *ctx = (struct cam_context *)data;
+ struct cam_ctx_request *req = NULL;
+ struct cam_ctx_request *req_temp = NULL;
+ struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
+ int rc = 0;
+ bool b_mem_found = false;
+
+ if (!ctx) {
+ CAM_ERR(CAM_ICP, "Invalid ctx");
+ return -EINVAL;
+ }
+
+ CAM_INFO(CAM_ICP, "iommu fault for icp ctx %d state %d",
+ ctx->ctx_id, ctx->state);
+
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->active_req_list, list) {
+ pf_dbg_entry = &(req->pf_data);
+ CAM_INFO(CAM_ICP, "req_id : %lld", req->request_id);
+
+ rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+ iova, buf_info, &b_mem_found);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to dump pf info");
+
+ if (b_mem_found)
+ CAM_ERR(CAM_ICP, "Found page fault in req %lld %d",
+ req->request_id, rc);
+ }
+
+ return rc;
+}
static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
@@ -156,6 +193,7 @@ static struct cam_ctx_ops
},
.crm_ops = {},
.irq_ops = __cam_icp_handle_buf_done_in_ready,
+ .pagefault_ops = cam_icp_context_dump_active_request,
},
/* Ready */
{
@@ -167,12 +205,14 @@ static struct cam_ctx_ops
},
.crm_ops = {},
.irq_ops = __cam_icp_handle_buf_done_in_ready,
+ .pagefault_ops = cam_icp_context_dump_active_request,
},
/* Activated */
{
.ioctl_ops = {},
.crm_ops = {},
.irq_ops = NULL,
+ .pagefault_ops = cam_icp_context_dump_active_request,
},
};
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
index 7df806b1b7a3..2ea7738cbdef 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
@@ -35,6 +35,7 @@
#include "cam_hw_mgr_intf.h"
#include "cam_icp_hw_mgr_intf.h"
#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
#define CAM_ICP_DEV_NAME "cam-icp"
@@ -55,6 +56,25 @@ static const struct of_device_id cam_icp_dt_match[] = {
{}
};
+static void cam_icp_dev_iommu_fault_handler(
+ struct iommu_domain *domain, struct device *dev, unsigned long iova,
+ int flags, void *token, uint32_t buf_info)
+{
+ int i = 0;
+ struct cam_node *node = NULL;
+
+ if (!token) {
+ CAM_ERR(CAM_ICP, "invalid token in page handler cb");
+ return;
+ }
+
+ node = (struct cam_node *)token;
+
+ for (i = 0; i < node->ctx_size; i++)
+ cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+ buf_info);
+}
+
static int cam_icp_subdev_open(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
@@ -96,7 +116,7 @@ static int cam_icp_subdev_close(struct v4l2_subdev *sd,
mutex_lock(&g_icp_dev.icp_lock);
if (g_icp_dev.open_cnt <= 0) {
- CAM_ERR(CAM_ICP, "ICP subdev is already closed");
+ CAM_DBG(CAM_ICP, "ICP subdev is already closed");
rc = -EINVAL;
goto end;
}
@@ -135,6 +155,7 @@ static int cam_icp_probe(struct platform_device *pdev)
int rc = 0, i = 0;
struct cam_node *node;
struct cam_hw_mgr_intf *hw_mgr_intf;
+ int iommu_hdl = -1;
if (!pdev) {
CAM_ERR(CAM_ICP, "pdev is NULL");
@@ -158,7 +179,8 @@ static int cam_icp_probe(struct platform_device *pdev)
goto hw_alloc_fail;
}
- rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf);
+ rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf,
+ &iommu_hdl);
if (rc) {
CAM_ERR(CAM_ICP, "ICP HW manager init failed: %d", rc);
goto hw_init_fail;
@@ -181,6 +203,9 @@ static int cam_icp_probe(struct platform_device *pdev)
goto ctx_fail;
}
+ cam_smmu_set_client_page_fault_handler(iommu_hdl,
+ cam_icp_dev_iommu_fault_handler, node);
+
g_icp_dev.open_cnt = 0;
mutex_init(&g_icp_dev.icp_lock);
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
index 3e636c65138d..3d0ee725dcad 100644
--- a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -23,10 +23,10 @@
* @reserved: reserved field
*/
struct hfi_mem {
- uint64_t len;
- uint64_t kva;
- uint32_t iova;
- uint32_t reserved;
+ uint64_t len;
+ uintptr_t kva;
+ uint32_t iova;
+ uint32_t reserved;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
index 14a3e656e76d..cbe6886e5712 100644
--- a/drivers/media/platform/msm/camera/cam_icp/hfi.c
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -42,6 +42,9 @@
#define HFI_MAX_POLL_TRY 5
+#define HFI_MAX_PC_POLL_TRY 50
+#define HFI_POLL_TRY_SLEEP 20
+
static struct hfi_info *g_hfi;
unsigned int g_icp_mmu_hdl;
static DEFINE_MUTEX(hfi_cmd_q_mutex);
@@ -513,8 +516,8 @@ void cam_hfi_disable_cpu(void __iomem *icp_base)
uint32_t val;
uint32_t try = 0;
- while (try < HFI_MAX_POLL_TRY) {
- data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+ while (try < HFI_MAX_PC_POLL_TRY) {
+ data = cam_io_r_mb(icp_base + HFI_REG_A5_CSR_A5_STATUS);
CAM_DBG(CAM_HFI, "wfi status = %x\n", (int)data);
if (data & ICP_CSR_A5_STATUS_WFI)
@@ -523,7 +526,7 @@ void cam_hfi_disable_cpu(void __iomem *icp_base)
* and Host can the proceed. No interrupt is expected from FW
* at this time.
*/
- msleep(100);
+ msleep_interruptible(HFI_POLL_TRY_SLEEP);
try++;
}
@@ -533,6 +536,11 @@ void cam_hfi_disable_cpu(void __iomem *icp_base)
val = cam_io_r(icp_base + HFI_REG_A5_CSR_NSEC_RESET);
cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+
+ cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET,
+ icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
+ cam_io_w_mb((uint32_t)INTR_DISABLE,
+ g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN);
}
void cam_hfi_enable_cpu(void __iomem *icp_base)
@@ -883,11 +891,6 @@ void cam_hfi_deinit(void __iomem *icp_base)
g_hfi->cmd_q_state = false;
g_hfi->msg_q_state = false;
- cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET,
- icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
-
- cam_io_w_mb((uint32_t)INTR_DISABLE,
- g_hfi->csr_base + HFI_REG_A5_CSR_A2HOSTINTEN);
kzfree(g_hfi);
g_hfi = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index 8f2c76943ebc..18bd6d8dd2c7 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -167,7 +167,7 @@ static int32_t cam_icp_program_fw(const uint8_t *elf,
if (prg_hdr->p_filesz != 0) {
src = (u8 *)((u8 *)elf + prg_hdr->p_offset);
dest = (u8 *)(((u8 *)core_info->fw_kva_addr) +
- prg_hdr->p_vaddr);
+ prg_hdr->p_vaddr);
memcpy_toio(dest, src, prg_hdr->p_filesz);
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
index f4bc813353a8..9b02167b3c7f 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
@@ -69,7 +69,7 @@ struct cam_a5_device_core_info {
const struct firmware *fw_elf;
void *fw;
uint32_t fw_buf;
- uint64_t fw_kva_addr;
+ uintptr_t fw_kva_addr;
uint64_t fw_buf_len;
struct cam_icp_a5_query_cap query_cap;
struct cam_icp_a5_acquire_dev a5_acquire[8];
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
index d01637436a51..b969c92ccbc8 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -199,8 +199,10 @@ static int cam_bps_handle_resume(struct cam_hw_info *bps_dev)
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
if (pwr_ctrl & BPS_COLLAPSE_MASK) {
- CAM_ERR(CAM_ICP, "BPS: pwr_ctrl(%x)", pwr_ctrl);
- return -EINVAL;
+ CAM_DBG(CAM_ICP, "BPS: pwr_ctrl set(%x)", pwr_ctrl);
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0);
}
rc = cam_bps_transfer_gdsc_control(soc_info);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 1d5e4832535e..ca8bb3c8d543 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -1455,7 +1455,8 @@ static int cam_icp_mgr_handle_frame_process(uint32_t *msg_ptr, int flag)
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
request_id = ioconfig_ack->user_data2;
- ctx_data = (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ioconfig_ack->user_data1);
if (!ctx_data) {
CAM_ERR(CAM_ICP, "Invalid Context");
return -EINVAL;
@@ -1562,8 +1563,8 @@ static int cam_icp_mgr_process_msg_config_io(uint32_t *msg_ptr)
ipe_config_ack->rc, ioconfig_ack->err_type);
return -EIO;
}
- ctx_data =
- (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ioconfig_ack->user_data1);
if (!ctx_data) {
CAM_ERR(CAM_ICP, "wrong ctx data from IPE response");
return -EINVAL;
@@ -1577,8 +1578,8 @@ static int cam_icp_mgr_process_msg_config_io(uint32_t *msg_ptr)
bps_config_ack->rc, ioconfig_ack->opcode);
return -EIO;
}
- ctx_data =
- (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ioconfig_ack->user_data1);
if (!ctx_data) {
CAM_ERR(CAM_ICP, "wrong ctx data from BPS response");
return -EINVAL;
@@ -1601,7 +1602,9 @@ static int cam_icp_mgr_process_msg_create_handle(uint32_t *msg_ptr)
return -EINVAL;
}
- ctx_data = (struct cam_icp_hw_ctx_data *)create_handle_ack->user_data1;
+ ctx_data =
+ (struct cam_icp_hw_ctx_data *)(uintptr_t)
+ create_handle_ack->user_data1;
if (!ctx_data) {
CAM_ERR(CAM_ICP, "Invalid ctx_data");
return -EINVAL;
@@ -1632,7 +1635,8 @@ static int cam_icp_mgr_process_msg_ping_ack(uint32_t *msg_ptr)
return -EINVAL;
}
- ctx_data = (struct cam_icp_hw_ctx_data *)ping_ack->user_data;
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ping_ack->user_data);
if (!ctx_data) {
CAM_ERR(CAM_ICP, "Invalid ctx_data");
return -EINVAL;
@@ -1696,8 +1700,8 @@ static int cam_icp_mgr_process_direct_ack_msg(uint32_t *msg_ptr)
case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
- ctx_data =
- (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ioconfig_ack->user_data1);
if (ctx_data->state != CAM_ICP_CTX_STATE_FREE)
complete(&ctx_data->wait_complete);
CAM_DBG(CAM_ICP, "received IPE/BPS/ ABORT: ctx_state =%d",
@@ -1706,8 +1710,8 @@ static int cam_icp_mgr_process_direct_ack_msg(uint32_t *msg_ptr)
case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
- ctx_data =
- (struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
+ ctx_data = (struct cam_icp_hw_ctx_data *)
+ U64_TO_PTR(ioconfig_ack->user_data1);
if ((ctx_data->state == CAM_ICP_CTX_STATE_RELEASE) ||
(ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)) {
complete(&ctx_data->wait_complete);
@@ -2132,7 +2136,7 @@ static int cam_icp_alloc_shared_mem(struct cam_mem_mgr_memory_desc *qtbl)
static int cam_icp_allocate_fw_mem(void)
{
int rc;
- uint64_t kvaddr;
+ uintptr_t kvaddr;
size_t len;
dma_addr_t iova;
@@ -2146,7 +2150,7 @@ static int cam_icp_allocate_fw_mem(void)
icp_hw_mgr.hfi_mem.fw_buf.iova = iova;
icp_hw_mgr.hfi_mem.fw_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
- CAM_DBG(CAM_ICP, "kva: %llX, iova: %llx, len: %zu",
+ CAM_DBG(CAM_ICP, "kva: %zX, iova: %llx, len: %zu",
kvaddr, iova, len);
return rc;
@@ -2486,7 +2490,7 @@ static int cam_icp_mgr_abort_handle(
reinit_completion(&ctx_data->wait_complete);
abort_cmd->num_fw_handles = 1;
abort_cmd->fw_handles[0] = ctx_data->fw_handle;
- abort_cmd->user_data1 = (uint64_t)ctx_data;
+ abort_cmd->user_data1 = PTR_TO_U64(ctx_data);
abort_cmd->user_data2 = (uint64_t)0x0;
rc = hfi_write_cmd(abort_cmd);
@@ -2537,7 +2541,7 @@ static int cam_icp_mgr_destroy_handle(
reinit_completion(&ctx_data->wait_complete);
destroy_cmd->num_fw_handles = 1;
destroy_cmd->fw_handles[0] = ctx_data->fw_handle;
- destroy_cmd->user_data1 = (uint64_t)ctx_data;
+ destroy_cmd->user_data1 = PTR_TO_U64(ctx_data);
destroy_cmd->user_data2 = (uint64_t)0x0;
memcpy(destroy_cmd->payload.direct, &ctx_data->temp_payload,
sizeof(uint64_t));
@@ -3097,7 +3101,7 @@ static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
ioconfig_cmd.num_fw_handles = 1;
ioconfig_cmd.fw_handles[0] = ctx_data->fw_handle;
ioconfig_cmd.payload.indirect = io_buf_addr;
- ioconfig_cmd.user_data1 = (uint64_t)ctx_data;
+ ioconfig_cmd.user_data1 = PTR_TO_U64(ctx_data);
ioconfig_cmd.user_data2 = (uint64_t)0x0;
task_data = (struct hfi_cmd_work_data *)task->payload;
task_data->data = (void *)&ioconfig_cmd;
@@ -3228,7 +3232,7 @@ static int cam_icp_mgr_prepare_frame_process_cmd(
hfi_cmd->num_fw_handles = 1;
hfi_cmd->fw_handles[0] = ctx_data->fw_handle;
hfi_cmd->payload.indirect = fw_cmd_buf_iova_addr;
- hfi_cmd->user_data1 = (uint64_t)ctx_data;
+ hfi_cmd->user_data1 = PTR_TO_U64(ctx_data);
hfi_cmd->user_data2 = request_id;
CAM_DBG(CAM_ICP, "ctx_data : %pK, request_id :%lld cmd_buf %x",
@@ -3277,7 +3281,7 @@ static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
dma_addr_t addr;
size_t len;
struct cam_cmd_buf_desc *cmd_desc = NULL;
- uint64_t cpu_addr = 0;
+ uintptr_t cpu_addr = 0;
struct ipe_frame_process_data *frame_process_data = NULL;
struct bps_frame_process_data *bps_frame_process_data = NULL;
struct frame_set *ipe_set = NULL;
@@ -3318,7 +3322,7 @@ static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
if (ctx_data->icp_dev_acquire_info->dev_type !=
CAM_ICP_RES_TYPE_BPS) {
- CAM_DBG(CAM_ICP, "cpu addr = %llx", cpu_addr);
+ CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
frame_process_data = (struct ipe_frame_process_data *)cpu_addr;
CAM_DBG(CAM_ICP, "%u %u %u", frame_process_data->max_num_cores,
frame_process_data->target_time,
@@ -3339,7 +3343,7 @@ static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
}
}
} else {
- CAM_DBG(CAM_ICP, "cpu addr = %llx", cpu_addr);
+ CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
bps_frame_process_data =
(struct bps_frame_process_data *)cpu_addr;
CAM_DBG(CAM_ICP, "%u %u",
@@ -3388,10 +3392,11 @@ static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
prepare_args->num_out_map_entries++;
}
CAM_DBG(CAM_REQ,
- "ctx_id: %u req_id: %llu dir[%d]: %u, fence: %u resource_type = %u",
+ "ctx_id: %u req_id: %llu dir[%d]: %u, fence: %u resource_type = %u memh %x",
ctx_data->ctx_id, packet->header.request_id, i,
io_cfg_ptr[i].direction, io_cfg_ptr[i].fence,
- io_cfg_ptr[i].resource_type);
+ io_cfg_ptr[i].resource_type,
+ io_cfg_ptr[i].mem_handle[0]);
}
if (prepare_args->num_in_map_entries > 1)
@@ -3439,7 +3444,7 @@ static int cam_icp_packet_generic_blob_handler(void *user_data,
uint32_t index;
size_t io_buf_size;
int rc = 0;
- uint64_t pResource;
+ uintptr_t pResource;
if (!blob_data || (blob_size == 0)) {
CAM_ERR(CAM_ICP, "Invalid blob info %pK %d", blob_data,
@@ -3545,7 +3550,7 @@ static int cam_icp_mgr_process_cfg_io_cmd(
ioconfig_cmd->num_fw_handles = 1;
ioconfig_cmd->fw_handles[0] = ctx_data->fw_handle;
ioconfig_cmd->payload.indirect = io_config;
- ioconfig_cmd->user_data1 = (uint64_t)ctx_data;
+ ioconfig_cmd->user_data1 = PTR_TO_U64(ctx_data);
ioconfig_cmd->user_data2 = request_id;
return 0;
@@ -3593,6 +3598,77 @@ static int cam_icp_mgr_update_hfi_frame_process(
return rc;
}
+static void cam_icp_mgr_print_io_bufs(struct cam_packet *packet,
+ int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+ bool *mem_found)
+{
+ uint64_t iova_addr;
+ size_t src_buf_size;
+ int i;
+ int j;
+ int rc = 0;
+ int32_t mmu_hdl;
+
+ struct cam_buf_io_cfg *io_cfg = NULL;
+
+ if (mem_found)
+ *mem_found = false;
+
+ io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+ packet->io_configs_offset / 4);
+
+ for (i = 0; i < packet->num_io_configs; i++) {
+ for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+ if (!io_cfg[i].mem_handle[j])
+ break;
+
+ if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+ GET_FD_FROM_HANDLE(pf_buf_info)) {
+ CAM_INFO(CAM_ICP,
+ "Found PF at port: %d mem %x fd: %x",
+ io_cfg[i].resource_type,
+ io_cfg[i].mem_handle[j],
+ pf_buf_info);
+ if (mem_found)
+ *mem_found = true;
+ }
+
+ CAM_INFO(CAM_ICP, "port: %d f: %u format: %d dir %d",
+ io_cfg[i].resource_type,
+ io_cfg[i].fence,
+ io_cfg[i].format,
+ io_cfg[i].direction);
+
+ mmu_hdl = cam_mem_is_secure_buf(
+ io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+ iommu_hdl;
+ rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+ mmu_hdl, &iova_addr, &src_buf_size);
+ if (rc < 0) {
+ CAM_ERR(CAM_UTIL, "get src buf address fail");
+ continue;
+ }
+ if (iova_addr >> 32) {
+ CAM_ERR(CAM_ICP, "Invalid mapped address");
+ rc = -EINVAL;
+ continue;
+ }
+
+ CAM_INFO(CAM_ICP,
+ "pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x",
+ j, io_cfg[i].planes[j].width,
+ io_cfg[i].planes[j].height,
+ (int32_t)src_buf_size,
+ (unsigned int)iova_addr,
+ io_cfg[i].offsets[j],
+ io_cfg[i].mem_handle[j]);
+
+ iova_addr += io_cfg[i].offsets[j];
+
+ }
+ }
+}
+
static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
void *prepare_hw_update_args)
{
@@ -3635,6 +3711,8 @@ static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
return rc;
}
+ prepare_args->pf_data->packet = packet;
+
CAM_DBG(CAM_REQ, "req id = %lld for ctx = %u",
packet->header.request_id, ctx_data->ctx_id);
/* Update Buffer Address from handles and patch information */
@@ -3671,7 +3749,7 @@ static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
fw_cmd_buf_iova_addr);
prepare_args->num_hw_update_entries = 1;
- prepare_args->hw_update_entries[0].addr = (uint64_t)hfi_cmd;
+ prepare_args->hw_update_entries[0].addr = (uintptr_t)hfi_cmd;
prepare_args->priv = &ctx_data->hfi_frame_process.frame_info[idx];
CAM_DBG(CAM_ICP, "X: req id = %lld ctx_id = %u",
@@ -3968,7 +4046,7 @@ static int cam_icp_mgr_create_handle(uint32_t dev_type,
create_handle.size = sizeof(struct hfi_cmd_create_handle);
create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE;
create_handle.handle_type = dev_type;
- create_handle.user_data1 = (uint64_t)ctx_data;
+ create_handle.user_data1 = PTR_TO_U64(ctx_data);
reinit_completion(&ctx_data->wait_complete);
task_data = (struct hfi_cmd_work_data *)task->payload;
task_data->data = (void *)&create_handle;
@@ -4013,7 +4091,7 @@ static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data)
ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
ping_pkt.pkt_type = HFI_CMD_SYS_PING;
- ping_pkt.user_data = (uint64_t)ctx_data;
+ ping_pkt.user_data = PTR_TO_U64(ctx_data);
init_completion(&ctx_data->wait_complete);
task_data = (struct hfi_cmd_work_data *)task->payload;
task_data->data = (void *)&ping_pkt;
@@ -4313,7 +4391,7 @@ static int cam_icp_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
mutex_lock(&hw_mgr->hw_mgr_mutex);
if (copy_from_user(&icp_hw_mgr.icp_caps,
- (void __user *)query_cap->caps_handle,
+ u64_to_user_ptr(query_cap->caps_handle),
sizeof(struct cam_icp_query_cap_cmd))) {
CAM_ERR(CAM_ICP, "copy_from_user failed");
rc = -EFAULT;
@@ -4327,7 +4405,7 @@ static int cam_icp_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
icp_hw_mgr.icp_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl;
icp_hw_mgr.icp_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl;
- if (copy_to_user((void __user *)query_cap->caps_handle,
+ if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle),
&icp_hw_mgr.icp_caps, sizeof(struct cam_icp_query_cap_cmd))) {
CAM_ERR(CAM_ICP, "copy_to_user failed");
rc = -EFAULT;
@@ -4551,7 +4629,35 @@ cmd_work_failed:
return rc;
}
-int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
+static int cam_icp_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+ int rc = 0;
+ struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+ struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+
+ if (!hw_mgr_priv || !cmd_args) {
+ CAM_ERR(CAM_ICP, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ switch (hw_cmd_args->cmd_type) {
+ case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+ cam_icp_mgr_print_io_bufs(
+ hw_cmd_args->u.pf_args.pf_data.packet,
+ hw_mgr->iommu_hdl,
+ hw_mgr->iommu_sec_hdl,
+ hw_cmd_args->u.pf_args.buf_info,
+ hw_cmd_args->u.pf_args.mem_found);
+ break;
+ default:
+ CAM_ERR(CAM_ICP, "Invalid cmd");
+ }
+
+ return rc;
+}
+
+int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+ int *iommu_hdl)
{
int i, rc = 0;
struct cam_hw_mgr_intf *hw_mgr_intf;
@@ -4574,6 +4680,7 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
hw_mgr_intf->hw_open = cam_icp_mgr_hw_open_u;
hw_mgr_intf->hw_close = cam_icp_mgr_hw_close_u;
hw_mgr_intf->hw_flush = cam_icp_mgr_hw_flush;
+ hw_mgr_intf->hw_cmd = cam_icp_mgr_cmd;
icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
mutex_init(&icp_hw_mgr.hw_mgr_mutex);
@@ -4617,6 +4724,9 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
if (rc)
goto icp_wq_create_failed;
+ if (iommu_hdl)
+ *iommu_hdl = icp_hw_mgr.iommu_hdl;
+
init_completion(&icp_hw_mgr.a5_complete);
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
index 771c4ed7c55c..7bb9b9ed18a2 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -28,7 +28,7 @@
#define CPAS_IPE1_BIT 0x2000
int cam_icp_hw_mgr_init(struct device_node *of_node,
- uint64_t *hw_mgr_hdl);
+ uint64_t *hw_mgr_hdl, int *iommu_hdl);
/**
* struct cam_icp_cpas_vote
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
index 620a4bd4943b..142fcdc6017d 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -195,9 +195,12 @@ static int cam_ipe_handle_resume(struct cam_hw_info *ipe_dev)
CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
true, &pwr_ctrl);
if (pwr_ctrl & IPE_COLLAPSE_MASK) {
- CAM_ERR(CAM_ICP, "IPE: resume failed : %d", pwr_ctrl);
- return -EINVAL;
+ CAM_DBG(CAM_ICP, "IPE pwr_ctrl set(%x)", pwr_ctrl);
+ cam_cpas_reg_write(core_info->cpas_handle,
+ CAM_CPAS_REG_CPASTOP,
+ hw_info->pwr_ctrl, true, 0);
}
+
rc = cam_ipe_transfer_gdsc_control(soc_info);
cam_cpas_reg_read(core_info->cpas_handle,
CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 5760371bbde3..316bb8efcf23 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,8 +23,15 @@
#include "cam_req_mgr_dev.h"
#include "cam_trace.h"
#include "cam_debug_util.h"
+#include "cam_packet_util.h"
+#include "cam_context_utils.h"
+#include "cam_common_util.h"
+
+static const char isp_dev_name[] = "cam-isp";
+
+static int cam_isp_context_dump_active_request(void *data, unsigned long iova,
+ uint32_t buf_info);
-static const char isp_dev_name[] = "isp";
static void __cam_isp_ctx_update_state_monitor_array(
struct cam_isp_context *ctx_isp,
enum cam_isp_state_change_trigger trigger_type,
@@ -375,7 +382,7 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state(
continue;
}
- if (!bubble_state) {
+ if (!req_isp->bubble_detected) {
CAM_DBG(CAM_ISP,
"Sync with success: req %lld res 0x%x fd 0x%x",
req->request_id,
@@ -402,15 +409,14 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state(
} else {
/*
* Ignore the buffer done if bubble detect is on
- * In most case, active list should be empty when
- * bubble detects. But for safety, we just move the
- * current active request to the pending list here.
+ * Increment the ack number here, and queue the
+ * request back to pending list whenever all the
+ * buffers are done.
*/
+ req_isp->num_acked++;
CAM_DBG(CAM_ISP,
"buf done with bubble state %d recovery %d",
bubble_state, req_isp->bubble_report);
- list_del_init(&req->list);
- list_add(&req->list, &ctx->pending_req_list);
continue;
}
@@ -431,10 +437,25 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state(
req_isp->num_fence_map_out);
WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
}
- if (req_isp->num_acked == req_isp->num_fence_map_out) {
+
+ if (req_isp->num_acked != req_isp->num_fence_map_out)
+ return rc;
+
+ ctx_isp->active_req_cnt--;
+
+ if (req_isp->bubble_detected && req_isp->bubble_report) {
+ req_isp->num_acked = 0;
+ req_isp->bubble_detected = false;
+ list_del_init(&req->list);
+ list_add(&req->list, &ctx->pending_req_list);
+
+ CAM_DBG(CAM_REQ,
+ "Move active request %lld to pending list(cnt = %d) [bubble recovery]",
+ req->request_id, ctx_isp->active_req_cnt);
+ } else {
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->free_req_list);
- ctx_isp->active_req_cnt--;
+
CAM_DBG(CAM_REQ,
"Move active request %lld to free list(cnt = %d) [all fences done]",
req->request_id, ctx_isp->active_req_cnt);
@@ -732,15 +753,13 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
list);
req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+ req_isp->bubble_detected = true;
CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
if (req_isp->bubble_report && ctx->ctx_crm_intf &&
ctx->ctx_crm_intf->notify_err) {
struct cam_req_mgr_error_notify notify;
- list_del_init(&req->list);
- list_add(&req->list, &ctx->pending_req_list);
-
notify.link_hdl = ctx->link_hdl;
notify.dev_hdl = ctx->dev_hdl;
notify.req_id = req->request_id;
@@ -749,18 +768,19 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
ctx_isp->frame_id);
} else {
- /*
- * Since can not bubble report, always move the request to
- * active list.
- */
- list_del_init(&req->list);
- list_add_tail(&req->list, &ctx->active_req_list);
- ctx_isp->active_req_cnt++;
- CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
- req->request_id, ctx_isp->active_req_cnt);
req_isp->bubble_report = 0;
}
+ /*
+ * Always move the request to active list. Let buf done
+ * function handles the rest.
+ */
+ CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ ctx_isp->active_req_cnt++;
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->active_req_list);
+
if (req->request_id > ctx_isp->reported_req_id) {
request_id = req->request_id;
ctx_isp->reported_req_id = request_id;
@@ -882,13 +902,12 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
list);
req_isp = (struct cam_isp_ctx_req *)req->req_priv;
- list_del_init(&req->list);
+ req_isp->bubble_detected = true;
if (req_isp->bubble_report && ctx->ctx_crm_intf &&
ctx->ctx_crm_intf->notify_err) {
struct cam_req_mgr_error_notify notify;
- list_add(&req->list, &ctx->pending_req_list);
notify.link_hdl = ctx->link_hdl;
notify.dev_hdl = ctx->dev_hdl;
notify.req_id = req->request_id;
@@ -898,17 +917,19 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
"Notify CRM about Bubble req_id %llu frame %lld",
req->request_id, ctx_isp->frame_id);
} else {
- /*
- * If we can not report bubble, then treat it as if no bubble
- * report. Just move the req to active list.
- */
- list_add_tail(&req->list, &ctx->active_req_list);
- ctx_isp->active_req_cnt++;
- CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
- req->request_id, ctx_isp->active_req_cnt);
req_isp->bubble_report = 0;
}
+ /*
+ * Always move the request to active list. Let buf done
+ * function handles the rest.
+ */
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+ ctx_isp->active_req_cnt++;
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->active_req_list);
+
if (!req_isp->bubble_report) {
if (req->request_id > ctx_isp->reported_req_id) {
request_id = req->request_id;
@@ -1411,6 +1432,7 @@ static int __cam_isp_ctx_flush_req_in_top_state(
CAM_DBG(CAM_ISP, "try to flush active list");
rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
flush_req);
+ ctx_isp->active_req_cnt = 0;
spin_unlock_bh(&ctx->lock);
/* Start hw */
@@ -1609,12 +1631,12 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
- if (list_empty(&ctx->pending_req_list)) {
+ if (list_empty(&ctx->wait_req_list)) {
/*
* If no pending req in epoch, this is an error case.
* The recovery is to go back to sof state
*/
- CAM_ERR(CAM_ISP, "No pending request");
+ CAM_ERR(CAM_ISP, "No wait request");
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
/* Send SOF event as empty frame*/
@@ -1624,9 +1646,10 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
goto end;
}
- req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+ req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
list);
req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+ req_isp->bubble_detected = true;
CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
if (req_isp->bubble_report && ctx->ctx_crm_intf &&
@@ -1641,18 +1664,19 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
ctx_isp->frame_id);
} else {
- /*
- * Since can not bubble report, always move the request to
- * active list.
- */
- list_del_init(&req->list);
- list_add_tail(&req->list, &ctx->active_req_list);
- ctx_isp->active_req_cnt++;
- CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
- req->request_id, ctx_isp->active_req_cnt);
req_isp->bubble_report = 0;
}
+ /*
+ * Always move the request to active list. Let buf done
+ * function handles the rest.
+ */
+ ctx_isp->active_req_cnt++;
+ list_del_init(&req->list);
+ list_add_tail(&req->list, &ctx->active_req_list);
+ CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+ req->request_id, ctx_isp->active_req_cnt);
+
if (!req_isp->bubble_report) {
if (req->request_id > ctx_isp->reported_req_id) {
request_id = req->request_id;
@@ -2008,7 +2032,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
int rc = 0, i;
struct cam_ctx_request *req = NULL;
struct cam_isp_ctx_req *req_isp;
- uint64_t packet_addr;
+ uintptr_t packet_addr;
struct cam_packet *packet;
size_t len = 0;
struct cam_hw_prepare_update_args cfg;
@@ -2038,16 +2062,16 @@ static int __cam_isp_ctx_config_dev_in_top_state(
/* for config dev, only memory handle is supported */
/* map packet from the memhandle */
rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
- (uint64_t *) &packet_addr, &len);
+ &packet_addr, &len);
if (rc != 0) {
CAM_ERR(CAM_ISP, "Can not get packet address");
rc = -EINVAL;
goto free_req;
}
- packet = (struct cam_packet *) (packet_addr + cmd->offset);
+ packet = (struct cam_packet *)(packet_addr + (uint32_t)cmd->offset);
CAM_DBG(CAM_ISP, "pack_handle %llx", cmd->packet_handle);
- CAM_DBG(CAM_ISP, "packet address is 0x%llx", packet_addr);
+ CAM_DBG(CAM_ISP, "packet address is 0x%zx", packet_addr);
CAM_DBG(CAM_ISP, "packet with length %zu, offset 0x%llx",
len, cmd->offset);
CAM_DBG(CAM_ISP, "Packet request id %lld",
@@ -2066,6 +2090,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
cfg.out_map_entries = req_isp->fence_map_out;
cfg.in_map_entries = req_isp->fence_map_in;
cfg.priv = &req_isp->hw_update_data;
+ cfg.pf_data = &(req->pf_data);
CAM_DBG(CAM_ISP, "try to prepare config packet......");
@@ -2080,6 +2105,7 @@ static int __cam_isp_ctx_config_dev_in_top_state(
req_isp->num_fence_map_out = cfg.num_out_map_entries;
req_isp->num_fence_map_in = cfg.num_in_map_entries;
req_isp->num_acked = 0;
+ req_isp->bubble_detected = false;
for (i = 0; i < req_isp->num_fence_map_out; i++) {
rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
@@ -2164,7 +2190,8 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
struct cam_hw_release_args release;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
- struct cam_isp_hw_cmd_args hw_cmd_args;
+ struct cam_hw_cmd_args hw_cmd_args;
+ struct cam_isp_hw_cmd_args isp_hw_cmd_args;
if (!ctx->hw_mgr_intf) {
CAM_ERR(CAM_ISP, "HW interface is not ready");
@@ -2200,7 +2227,7 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
CAM_DBG(CAM_ISP, "start copy %d resources from user",
cmd->num_resources);
- if (copy_from_user(isp_res, (void __user *)cmd->resource_hdl,
+ if (copy_from_user(isp_res, u64_to_user_ptr(cmd->resource_hdl),
sizeof(*isp_res)*cmd->num_resources)) {
rc = -EFAULT;
goto free_res;
@@ -2209,7 +2236,7 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
param.context_data = ctx;
param.event_cb = ctx->irq_cb_intf;
param.num_acq = cmd->num_resources;
- param.acquire_info = (uint64_t) isp_res;
+ param.acquire_info = (uintptr_t) isp_res;
/* call HW manager to reserve the resource */
rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
@@ -2221,7 +2248,9 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
/* Query the context has rdi only resource */
hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
- hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
+ hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+ isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
+ hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
&hw_cmd_args);
if (rc) {
@@ -2229,7 +2258,7 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
goto free_hw;
}
- if (hw_cmd_args.u.is_rdi_only_context) {
+ if (isp_hw_cmd_args.u.is_rdi_only_context) {
/*
* this context has rdi only resource assign rdi only
* state machine
@@ -2248,8 +2277,9 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
cam_isp_ctx_activated_state_machine;
}
- ctx_isp->rdi_only_context = hw_cmd_args.u.is_rdi_only_context;
+ ctx_isp->rdi_only_context = isp_hw_cmd_args.u.is_rdi_only_context;
ctx_isp->hw_ctx = param.ctxt_to_hw_map;
+ ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
req_hdl_param.session_hdl = cmd->session_handle;
/* bridge is not ready for these flags. so false for now */
@@ -2276,7 +2306,7 @@ static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
CAM_DBG(CAM_ISP,
"Acquire success on session_hdl 0x%x num_rsrces %d RDI only %d ctx %u",
cmd->session_handle, cmd->num_resources,
- (hw_cmd_args.u.is_rdi_only_context ? 1 : 0), ctx->ctx_id);
+ (isp_hw_cmd_args.u.is_rdi_only_context ? 1 : 0), ctx->ctx_id);
kfree(isp_res);
return rc;
@@ -2567,12 +2597,15 @@ static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
{
int rc = 0;
- struct cam_isp_hw_cmd_args hw_cmd_args;
+ struct cam_hw_cmd_args hw_cmd_args;
+ struct cam_isp_hw_cmd_args isp_hw_cmd_args;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
- hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
+ hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+ isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
+ hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
&hw_cmd_args);
@@ -2582,12 +2615,15 @@ static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
{
int rc = 0;
- struct cam_isp_hw_cmd_args hw_cmd_args;
+ struct cam_hw_cmd_args hw_cmd_args;
+ struct cam_isp_hw_cmd_args isp_hw_cmd_args;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
- hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
+ hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+ isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
+ hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
&hw_cmd_args);
@@ -2598,13 +2634,16 @@ static int __cam_isp_ctx_handle_sof_freeze_evt(
struct cam_context *ctx)
{
int rc = 0;
- struct cam_isp_hw_cmd_args hw_cmd_args;
+ struct cam_hw_cmd_args hw_cmd_args;
+ struct cam_isp_hw_cmd_args isp_hw_cmd_args;
struct cam_isp_context *ctx_isp =
(struct cam_isp_context *) ctx->ctx_priv;
hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
- hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
- hw_cmd_args.u.sof_irq_enable = 1;
+ hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+ isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
+ isp_hw_cmd_args.u.sof_irq_enable = 1;
+ hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
&hw_cmd_args);
@@ -2746,6 +2785,7 @@ static struct cam_ctx_ops
.flush_req = __cam_isp_ctx_flush_req_in_top_state,
},
.irq_ops = NULL,
+ .pagefault_ops = cam_isp_context_dump_active_request,
},
/* Ready */
{
@@ -2759,6 +2799,7 @@ static struct cam_ctx_ops
.flush_req = __cam_isp_ctx_flush_req_in_ready,
},
.irq_ops = NULL,
+ .pagefault_ops = cam_isp_context_dump_active_request,
},
/* Activated */
{
@@ -2774,10 +2815,55 @@ static struct cam_ctx_ops
.process_evt = __cam_isp_ctx_process_evt,
},
.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
+ .pagefault_ops = cam_isp_context_dump_active_request,
},
};
+static int cam_isp_context_dump_active_request(void *data, unsigned long iova,
+ uint32_t buf_info)
+{
+
+ struct cam_context *ctx = (struct cam_context *)data;
+ struct cam_ctx_request *req = NULL;
+ struct cam_ctx_request *req_temp = NULL;
+ struct cam_isp_ctx_req *req_isp = NULL;
+ struct cam_isp_prepare_hw_update_data *hw_update_data = NULL;
+ struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
+ bool mem_found = false;
+ int rc = 0;
+
+ struct cam_isp_context *isp_ctx =
+ (struct cam_isp_context *)ctx->ctx_priv;
+
+ if (!isp_ctx) {
+ CAM_ERR(CAM_ISP, "Invalid isp ctx");
+ return -EINVAL;
+ }
+
+ CAM_INFO(CAM_ISP, "iommu fault handler for isp ctx %d state %d",
+ ctx->ctx_id, ctx->state);
+
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->active_req_list, list) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ hw_update_data = &req_isp->hw_update_data;
+ pf_dbg_entry = &(req->pf_data);
+ CAM_INFO(CAM_ISP, "req_id : %lld ", req->request_id);
+
+ rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+ iova, buf_info, &mem_found);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Failed to dump pf info");
+
+ if (mem_found)
+ CAM_ERR(CAM_ISP, "Found page fault in req %lld %d",
+ req->request_id, rc);
+ }
+
+ return rc;
+}
+
int cam_isp_context_init(struct cam_isp_context *ctx,
struct cam_context *ctx_base,
struct cam_req_mgr_kmd_ops *crm_node_intf,
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index 4592e42932d8..6f89841d22bc 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -115,6 +115,7 @@ struct cam_isp_ctx_req {
uint32_t num_acked;
int32_t bubble_report;
struct cam_isp_prepare_hw_update_data hw_update_data;
+ bool bubble_detected;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
index a067915bed7d..c7e5d3836fda 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
@@ -26,9 +26,29 @@
#include "cam_isp_hw_mgr_intf.h"
#include "cam_node.h"
#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
static struct cam_isp_dev g_isp_dev;
+static void cam_isp_dev_iommu_fault_handler(
+ struct iommu_domain *domain, struct device *dev, unsigned long iova,
+ int flags, void *token, uint32_t buf_info)
+{
+ int i = 0;
+ struct cam_node *node = NULL;
+
+ if (!token) {
+ CAM_ERR(CAM_ISP, "invalid token in page handler cb");
+ return;
+ }
+
+ node = (struct cam_node *)token;
+
+ for (i = 0; i < node->ctx_size; i++)
+ cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+ buf_info);
+}
+
static const struct of_device_id cam_isp_dt_match[] = {
{
.compatible = "qcom,cam-isp"
@@ -36,23 +56,47 @@ static const struct of_device_id cam_isp_dt_match[] = {
{}
};
+static int cam_isp_subdev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ mutex_lock(&g_isp_dev.isp_mutex);
+ g_isp_dev.open_cnt++;
+ mutex_unlock(&g_isp_dev.isp_mutex);
+
+ return 0;
+}
+
static int cam_isp_subdev_close(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
+ int rc = 0;
struct cam_node *node = v4l2_get_subdevdata(sd);
+ mutex_lock(&g_isp_dev.isp_mutex);
+ if (g_isp_dev.open_cnt <= 0) {
+ CAM_DBG(CAM_ISP, "ISP subdev is already closed");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ g_isp_dev.open_cnt--;
if (!node) {
CAM_ERR(CAM_ISP, "Node ptr is NULL");
- return -EINVAL;
+ rc = -EINVAL;
+ goto end;
}
- cam_node_shutdown(node);
+ if (g_isp_dev.open_cnt == 0)
+ cam_node_shutdown(node);
- return 0;
+end:
+ mutex_unlock(&g_isp_dev.isp_mutex);
+ return rc;
}
static const struct v4l2_subdev_internal_ops cam_isp_subdev_internal_ops = {
.close = cam_isp_subdev_close,
+ .open = cam_isp_subdev_open,
};
static int cam_isp_dev_remove(struct platform_device *pdev)
@@ -82,6 +126,7 @@ static int cam_isp_dev_probe(struct platform_device *pdev)
int i;
struct cam_hw_mgr_intf hw_mgr_intf;
struct cam_node *node;
+ int iommu_hdl = -1;
g_isp_dev.sd.internal_ops = &cam_isp_subdev_internal_ops;
/* Initialze the v4l2 subdevice first. (create cam_node) */
@@ -94,7 +139,7 @@ static int cam_isp_dev_probe(struct platform_device *pdev)
node = (struct cam_node *) g_isp_dev.sd.token;
memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf));
- rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
+ rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf, &iommu_hdl);
if (rc != 0) {
CAM_ERR(CAM_ISP, "Can not initialized ISP HW manager!");
goto unregister;
@@ -119,6 +164,11 @@ static int cam_isp_dev_probe(struct platform_device *pdev)
goto unregister;
}
+ cam_smmu_set_client_page_fault_handler(iommu_hdl,
+ cam_isp_dev_iommu_fault_handler, node);
+
+ mutex_init(&g_isp_dev.isp_mutex);
+
CAM_INFO(CAM_ISP, "Camera ISP probe complete");
return 0;
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
index 95463ca37a13..a88ed5533907 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,12 +24,15 @@
* @sd: Commone camera subdevice node
* @ctx: Isp base context storage
* @ctx_isp: Isp private context storage
- *
+ * @isp_mutex: ISP dev mutex
+ * @open_cnt: Open device count
*/
struct cam_isp_dev {
struct cam_subdev sd;
struct cam_context ctx[CAM_CTX_MAX];
struct cam_isp_context ctx_isp[CAM_CTX_MAX];
+ struct mutex isp_mutex;
+ int32_t open_cnt;
};
#endif /* __CAM_ISP_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index aab323eac67b..e8e9aa1890b3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,8 @@
#include "cam_packet_util.h"
#include "cam_debug_util.h"
#include "cam_cpas_api.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_common_util.h"
#define CAM_IFE_HW_ENTRIES_MAX 20
@@ -94,7 +96,8 @@ static int cam_ife_mgr_get_hw_caps(void *hw_mgr_priv,
CAM_DBG(CAM_ISP, "enter");
- if (copy_from_user(&query_isp, (void __user *)query->caps_handle,
+ if (copy_from_user(&query_isp,
+ u64_to_user_ptr(query->caps_handle),
sizeof(struct cam_isp_query_cap_cmd))) {
rc = -EFAULT;
return rc;
@@ -113,8 +116,8 @@ static int cam_ife_mgr_get_hw_caps(void *hw_mgr_priv,
query_isp.dev_caps[i].hw_version.reserved = 0;
}
- if (copy_to_user((void __user *)query->caps_handle, &query_isp,
- sizeof(struct cam_isp_query_cap_cmd)))
+ if (copy_to_user(u64_to_user_ptr(query->caps_handle),
+ &query_isp, sizeof(struct cam_isp_query_cap_cmd)))
rc = -EFAULT;
CAM_DBG(CAM_ISP, "exit rc :%d", rc);
@@ -712,7 +715,8 @@ static int cam_ife_hw_mgr_acquire_res_ife_out_rdi(
ife_out_res->hw_res[0] = vfe_acquire.vfe_out.rsrc_node;
ife_out_res->is_dual_vfe = 0;
ife_out_res->res_id = vfe_out_res_id;
- ife_out_res->res_type = CAM_ISP_RESOURCE_VFE_OUT;
+ ife_out_res->res_type = (enum cam_ife_hw_mgr_res_type)
+ CAM_ISP_RESOURCE_VFE_OUT;
ife_src_res->child[ife_src_res->num_children++] = ife_out_res;
return 0;
@@ -762,6 +766,8 @@ static int cam_ife_hw_mgr_acquire_res_ife_out_pixel(
if (!ife_src_res->hw_res[j])
continue;
+ hw_intf = ife_src_res->hw_res[j]->hw_intf;
+
if (j == CAM_ISP_HW_SPLIT_LEFT) {
vfe_acquire.vfe_out.split_id =
CAM_ISP_HW_SPLIT_LEFT;
@@ -769,7 +775,7 @@ static int cam_ife_hw_mgr_acquire_res_ife_out_pixel(
/*TBD */
vfe_acquire.vfe_out.is_master = 1;
vfe_acquire.vfe_out.dual_slave_core =
- 1;
+ (hw_intf->hw_idx == 0) ? 1 : 0;
} else {
vfe_acquire.vfe_out.is_master = 0;
vfe_acquire.vfe_out.dual_slave_core =
@@ -779,10 +785,10 @@ static int cam_ife_hw_mgr_acquire_res_ife_out_pixel(
vfe_acquire.vfe_out.split_id =
CAM_ISP_HW_SPLIT_RIGHT;
vfe_acquire.vfe_out.is_master = 0;
- vfe_acquire.vfe_out.dual_slave_core = 0;
+ vfe_acquire.vfe_out.dual_slave_core =
+ (hw_intf->hw_idx == 0) ? 1 : 0;
}
- hw_intf = ife_src_res->hw_res[j]->hw_intf;
rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
&vfe_acquire,
sizeof(struct cam_vfe_acquire_args));
@@ -800,7 +806,8 @@ static int cam_ife_hw_mgr_acquire_res_ife_out_pixel(
ife_out_res->hw_res[j]->res_id);
}
- ife_out_res->res_type = CAM_ISP_RESOURCE_VFE_OUT;
+ ife_out_res->res_type = (enum cam_ife_hw_mgr_res_type)
+ CAM_ISP_RESOURCE_VFE_OUT;
ife_out_res->res_id = out_port->res_type;
ife_out_res->parent = ife_src_res;
ife_src_res->child[ife_src_res->num_children++] = ife_out_res;
@@ -913,7 +920,8 @@ static int cam_ife_hw_mgr_acquire_res_ife_src(
CAM_ERR(CAM_ISP, "Wrong IFE CSID Resource Node");
goto err;
}
- ife_src_res->res_type = vfe_acquire.rsrc_type;
+ ife_src_res->res_type = (enum cam_ife_hw_mgr_res_type)
+ vfe_acquire.rsrc_type;
ife_src_res->res_id = vfe_acquire.vfe_in.res_id;
ife_src_res->is_dual_vfe = csid_res->is_dual_vfe;
@@ -1038,7 +1046,7 @@ static int cam_ife_mgr_acquire_cid_res(
}
/* Acquire Left if not already acquired */
- for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+ for (i = CAM_IFE_CSID_HW_NUM_MAX - 1; i >= 0; i--) {
if (!ife_hw_mgr->csid_devices[i])
continue;
@@ -1054,7 +1062,7 @@ static int cam_ife_mgr_acquire_cid_res(
}
}
- if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
+ if (i == -1 || !csid_acquire.node_res) {
CAM_ERR(CAM_ISP, "Can not acquire ife cid resource for path %d",
csid_path);
goto put_res;
@@ -1145,7 +1153,8 @@ static int cam_ife_hw_mgr_acquire_res_ife_csid_ipp(
goto end;
}
- csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH;
+ csid_res->res_type = (enum cam_ife_hw_mgr_res_type)
+ CAM_ISP_RESOURCE_PIX_PATH;
csid_res->res_id = CAM_IFE_PIX_PATH_RES_IPP;
if (in_port->usage_type)
@@ -1559,9 +1568,18 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv,
goto free_res;
}
- in_port = memdup_user((void __user *)isp_resource[i].res_hdl,
+ in_port = memdup_user(
+ u64_to_user_ptr(isp_resource[i].res_hdl),
isp_resource[i].length);
if (!IS_ERR(in_port)) {
+ if (in_port->num_out_res > CAM_IFE_HW_OUT_RES_MAX) {
+ CAM_ERR(CAM_ISP, "too many output res %d",
+ in_port->num_out_res);
+ rc = -EINVAL;
+ kfree(in_port);
+ goto free_res;
+ }
+
in_port_length = sizeof(struct cam_isp_in_port_info) +
(in_port->num_out_res - 1) *
sizeof(struct cam_isp_out_port_info);
@@ -2159,7 +2177,8 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
struct cam_isp_stop_args stop_isp;
struct cam_ife_hw_mgr_ctx *ctx;
struct cam_ife_hw_mgr_res *hw_mgr_res;
- uint32_t i;
+ struct cam_isp_resource_node *rsrc_node = NULL;
+ uint32_t i, camif_debug;
if (!hw_mgr_priv || !start_isp) {
CAM_ERR(CAM_ISP, "Invalid arguments");
@@ -2193,6 +2212,24 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
sizeof(g_ife_hw_mgr.debug_cfg.csid_debug));
}
+ camif_debug = g_ife_hw_mgr.debug_cfg.camif_debug;
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+
+ rsrc_node = hw_mgr_res->hw_res[i];
+ if (rsrc_node->process_cmd && (rsrc_node->res_id ==
+ CAM_ISP_HW_VFE_IN_CAMIF)) {
+ rc = hw_mgr_res->hw_res[i]->process_cmd(
+ hw_mgr_res->hw_res[i],
+ CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
+ &camif_debug,
+ sizeof(camif_debug));
+ }
+ }
+ }
+
rc = cam_ife_hw_mgr_init_hw(ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Init failed");
@@ -2804,45 +2841,133 @@ static int cam_ife_mgr_sof_irq_debug(
return rc;
}
+static void cam_ife_mgr_print_io_bufs(struct cam_packet *packet,
+ int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+ bool *mem_found)
+{
+ uint64_t iova_addr;
+ size_t src_buf_size;
+ int i;
+ int j;
+ int rc = 0;
+ int32_t mmu_hdl;
+
+ struct cam_buf_io_cfg *io_cfg = NULL;
+
+ if (mem_found)
+ *mem_found = false;
+
+ io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+ packet->io_configs_offset / 4);
+
+ for (i = 0; i < packet->num_io_configs; i++) {
+ for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+ if (!io_cfg[i].mem_handle[j])
+ break;
+
+ if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+ GET_FD_FROM_HANDLE(pf_buf_info)) {
+ CAM_INFO(CAM_ISP,
+ "Found PF at port: %d mem %x fd: %x",
+ io_cfg[i].resource_type,
+ io_cfg[i].mem_handle[j],
+ pf_buf_info);
+ if (mem_found)
+ *mem_found = true;
+ }
+
+ CAM_INFO(CAM_ISP, "port: %d f: %u format: %d dir %d",
+ io_cfg[i].resource_type,
+ io_cfg[i].fence,
+ io_cfg[i].format,
+ io_cfg[i].direction);
+
+ mmu_hdl = cam_mem_is_secure_buf(
+ io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+ iommu_hdl;
+ rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+ mmu_hdl, &iova_addr, &src_buf_size);
+ if (rc < 0) {
+ CAM_ERR(CAM_ISP, "get src buf address fail");
+ continue;
+ }
+ if (iova_addr >> 32) {
+ CAM_ERR(CAM_ISP, "Invalid mapped address");
+ rc = -EINVAL;
+ continue;
+ }
+
+ CAM_INFO(CAM_ISP,
+ "pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x",
+ j, io_cfg[i].planes[j].width,
+ io_cfg[i].planes[j].height,
+ (int32_t)src_buf_size,
+ (unsigned int)iova_addr,
+ io_cfg[i].offsets[j],
+ io_cfg[i].mem_handle[j]);
+ }
+ }
+}
+
static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
{
int rc = 0;
- struct cam_isp_hw_cmd_args *hw_cmd_args = cmd_args;
- struct cam_ife_hw_mgr_ctx *ctx;
+ struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+ struct cam_ife_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_ife_hw_mgr_ctx *ctx = (struct cam_ife_hw_mgr_ctx *)
+ hw_cmd_args->ctxt_to_hw_map;
if (!hw_mgr_priv || !cmd_args) {
CAM_ERR(CAM_ISP, "Invalid arguments");
return -EINVAL;
}
- ctx = (struct cam_ife_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map;
if (!ctx || !ctx->ctx_in_use) {
CAM_ERR(CAM_ISP, "Fatal: Invalid context is used");
return -EPERM;
}
switch (hw_cmd_args->cmd_type) {
- case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT:
- if (ctx->is_rdi_only_context)
- hw_cmd_args->u.is_rdi_only_context = 1;
- else
- hw_cmd_args->u.is_rdi_only_context = 0;
-
- break;
- case CAM_ISP_HW_MGR_CMD_PAUSE_HW:
- cam_ife_mgr_pause_hw(ctx);
- break;
- case CAM_ISP_HW_MGR_CMD_RESUME_HW:
- cam_ife_mgr_resume_hw(ctx);
+ case CAM_HW_MGR_CMD_INTERNAL: {
+ struct cam_isp_hw_cmd_args *isp_hw_cmd_args =
+ (struct cam_isp_hw_cmd_args *)hw_cmd_args->
+ u.internal_args;
+
+ switch (isp_hw_cmd_args->cmd_type) {
+ case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT:
+ if (ctx->is_rdi_only_context)
+ isp_hw_cmd_args->u.is_rdi_only_context = 1;
+ else
+ isp_hw_cmd_args->u.is_rdi_only_context = 0;
+ break;
+ case CAM_ISP_HW_MGR_CMD_PAUSE_HW:
+ cam_ife_mgr_pause_hw(ctx);
+ break;
+ case CAM_ISP_HW_MGR_CMD_RESUME_HW:
+ cam_ife_mgr_resume_hw(ctx);
+ break;
+ case CAM_ISP_HW_MGR_CMD_SOF_DEBUG:
+ cam_ife_mgr_sof_irq_debug(ctx,
+ isp_hw_cmd_args->u.sof_irq_enable);
+ break;
+ default:
+ CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
+ hw_cmd_args->cmd_type);
+ rc = -EINVAL;
+ break;
+ }
break;
- case CAM_ISP_HW_MGR_CMD_SOF_DEBUG:
- cam_ife_mgr_sof_irq_debug(ctx, hw_cmd_args->u.sof_irq_enable);
+ }
+ case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+ cam_ife_mgr_print_io_bufs(
+ hw_cmd_args->u.pf_args.pf_data.packet,
+ hw_mgr->mgr_common.img_iommu_hdl,
+ hw_mgr->mgr_common.img_iommu_hdl_secure,
+ hw_cmd_args->u.pf_args.buf_info,
+ hw_cmd_args->u.pf_args.mem_found);
break;
default:
- CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
- hw_cmd_args->cmd_type);
- rc = -EINVAL;
- break;
+ CAM_ERR(CAM_ISP, "Invalid cmd");
}
return rc;
@@ -4038,8 +4163,8 @@ int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv,
evt_payload = evt_payload_priv;
ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)evt_payload->ctx;
- CAM_DBG(CAM_ISP, "addr of evt_payload = %llx core index:0x%x",
- (uint64_t)evt_payload, evt_payload->core_index);
+ CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core index:0x%x",
+ evt_payload, evt_payload->core_index);
CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
@@ -4174,6 +4299,28 @@ DEFINE_SIMPLE_ATTRIBUTE(cam_ife_csid_debug,
cam_ife_get_csid_debug,
cam_ife_set_csid_debug, "%16llu");
+static int cam_ife_set_camif_debug(void *data, u64 val)
+{
+ g_ife_hw_mgr.debug_cfg.camif_debug = val;
+ CAM_DBG(CAM_ISP,
+ "Set camif enable_diag_sensor_status value :%lld", val);
+ return 0;
+}
+
+static int cam_ife_get_camif_debug(void *data, u64 *val)
+{
+ *val = g_ife_hw_mgr.debug_cfg.camif_debug;
+ CAM_DBG(CAM_ISP,
+ "Set camif enable_diag_sensor_status value :%lld",
+ g_ife_hw_mgr.debug_cfg.csid_debug);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_ife_camif_debug,
+ cam_ife_get_camif_debug,
+ cam_ife_set_camif_debug, "%16llu");
+
static int cam_ife_hw_mgr_debug_register(void)
{
g_ife_hw_mgr.debug_cfg.dentry = debugfs_create_dir("camera_ife",
@@ -4199,6 +4346,14 @@ static int cam_ife_hw_mgr_debug_register(void)
CAM_ERR(CAM_ISP, "failed to create enable_recovery");
goto err;
}
+
+ if (!debugfs_create_file("ife_camif_debug",
+ 0644,
+ g_ife_hw_mgr.debug_cfg.dentry, NULL,
+ &cam_ife_camif_debug)) {
+ CAM_ERR(CAM_ISP, "failed to create cam_ife_camif_debug");
+ goto err;
+ }
g_ife_hw_mgr.debug_cfg.enable_recovery = 0;
return 0;
@@ -4208,7 +4363,7 @@ err:
return -ENOMEM;
}
-int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
+int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
{
int rc = -EFAULT;
int i, j;
@@ -4380,6 +4535,9 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf)
hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
+ if (iommu_hdl)
+ *iommu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl;
+
cam_ife_hw_mgr_debug_register();
CAM_DBG(CAM_ISP, "Exit");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 0198f3d62e9c..9bfa34fe91ab 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -81,15 +81,17 @@ struct ctx_base_info {
/**
* struct cam_ife_hw_mgr_debug - contain the debug information
*
- * @dentry: Debugfs entry
- * @csid_debug: csid debug information
- * @enable_recovery enable recovery
+ * @dentry: Debugfs entry
+ * @csid_debug: csid debug information
+ * @enable_recovery: enable recovery
+ * @enable_diag_sensor_status: enable sensor diagnosis status
*
*/
struct cam_ife_hw_mgr_debug {
struct dentry *dentry;
uint64_t csid_debug;
uint32_t enable_recovery;
+ uint32_t camif_debug;
};
/**
@@ -203,9 +205,10 @@ struct cam_ife_hw_mgr {
* etnry functinon for the IFE HW manager.
*
* @hw_mgr_intf: IFE hardware manager object returned
+ * @iommu_hdl: Iommu handle to be returned
*
*/
-int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf);
+int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl);
/**
* cam_ife_mgr_do_tasklet_buf_done()
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
index 2f18895c2402..8b9c555cae17 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,7 @@
int cam_isp_hw_mgr_init(struct device_node *of_node,
- struct cam_hw_mgr_intf *hw_mgr)
+ struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl)
{
int rc = 0;
const char *compat_str = NULL;
@@ -25,7 +25,7 @@ int cam_isp_hw_mgr_init(struct device_node *of_node,
(const char **)&compat_str);
if (strnstr(compat_str, "ife", strlen(compat_str)))
- rc = cam_ife_hw_mgr_init(hw_mgr);
+ rc = cam_ife_hw_mgr_init(hw_mgr, iommu_hdl);
else {
CAM_ERR(CAM_ISP, "Invalid ISP hw type");
rc = -EINVAL;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index eaa7325fbca8..f652256d3dc5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -457,6 +457,7 @@ int cam_isp_add_io_buffers(
num_out_buf = 0;
num_in_buf = 0;
io_cfg_used_bytes = 0;
+ prepare->pf_data->packet = prepare->packet;
/* Max one hw entries required for each base */
if (prepare->num_hw_update_entries + 1 >=
@@ -595,13 +596,6 @@ int cam_isp_add_io_buffers(
return rc;
}
- if (io_addr[plane_id] >> 32) {
- CAM_ERR(CAM_ISP,
- "Invalid mapped address");
- rc = -EINVAL;
- return rc;
- }
-
/* need to update with offset */
io_addr[plane_id] +=
io_cfg[i].offsets[plane_id];
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
index fd71c37c8fa1..1586216f2073 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -203,13 +203,11 @@ enum cam_isp_hw_mgr_command {
/**
* struct cam_isp_hw_cmd_args - Payload for hw manager command
*
- * @ctxt_to_hw_map: HW context from the acquire
* @cmd_type HW command type
* @get_context Get context type information
*/
struct cam_isp_hw_cmd_args {
- void *ctxt_to_hw_map;
- uint32_t cmd_type;
+ uint32_t cmd_type;
union {
uint32_t is_rdi_only_context;
uint32_t sof_irq_enable;
@@ -225,9 +223,9 @@ struct cam_isp_hw_cmd_args {
* @of_node: Device node input
* @hw_mgr: Input/output structure for the ISP hardware manager
* initialization
- *
+ * @iommu_hdl: Iommu handle to be returned
*/
int cam_isp_hw_mgr_init(struct device_node *of_node,
- struct cam_hw_mgr_intf *hw_mgr);
+ struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl);
#endif /* __CAM_ISP_HW_MGR_INTF_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
index c68ddf7343fc..f90356aa2e5c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -52,6 +52,7 @@ static struct cam_ife_csid_ipp_reg_offset cam_ife_csid_170_ipp_reg_offset = {
.csid_ipp_timestamp_perv1_eof_addr = 0x2ac,
/* configurations */
.pix_store_en_shift_val = 7,
+ .early_eof_en_shift_val = 29,
};
static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_0_reg_offset = {
@@ -286,6 +287,8 @@ static struct cam_ife_csid_common_reg_offset
.crop_shift = 16,
.ipp_irq_mask_all = 0x7FFF,
.rdi_irq_mask_all = 0x7FFF,
+ .measure_en_hbi_vbi_cnt_mask = 0xC,
+ .format_measure_en_val = 1,
};
struct cam_ife_csid_reg_offset cam_ife_csid_170_reg_offset = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index c3431cae7bec..a4c0cb10863e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -45,6 +45,9 @@
/* Max number of sof irq's triggered in case of SOF freeze */
#define CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX 6
+/* Max CSI Rx irq error count threshold value */
+#define CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT 100
+
static int cam_ife_csid_is_ipp_format_supported(
uint32_t in_format)
{
@@ -423,6 +426,7 @@ static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
if (val != 0)
CAM_ERR(CAM_ISP, "CSID:%d IRQ value after reset rc = %d",
csid_hw->hw_intf->hw_idx, val);
+ csid_hw->error_irq_count = 0;
return rc;
}
@@ -1046,6 +1050,7 @@ static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
csid_hw->hw_intf->hw_idx);
csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+ csid_hw->error_irq_count = 0;
return rc;
}
@@ -1390,8 +1395,12 @@ static int cam_ife_csid_init_config_ipp_path(
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_cfg1_addr);
+
/* select the post irq sub sample strobe for time stamp capture */
- cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
+ val |= CSID_TIMESTAMP_STB_POST_IRQ;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_ipp_cfg1_addr);
if (path_data->crop_enable) {
@@ -1410,6 +1419,16 @@ static int cam_ife_csid_init_config_ipp_path(
csid_reg->ipp_reg->csid_ipp_vcrop_addr);
CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x",
csid_hw->hw_intf->hw_idx, val);
+
+ /* Enable generating early eof strobe based on crop config */
+ if (!(csid_hw->csid_debug & CSID_DEBUG_DISABLE_EARLY_EOF)) {
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+ val |= (1 <<
+ csid_reg->ipp_reg->early_eof_en_shift_val);
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+ }
}
/* set frame drop pattern to 0 and period to 1 */
@@ -1438,9 +1457,23 @@ static int cam_ife_csid_init_config_ipp_path(
val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_ipp_cfg0_addr);
val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO)
+ val |= csid_reg->cmn_reg->format_measure_en_val;
+
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->ipp_reg->csid_ipp_cfg0_addr);
+ /* Enable the HBI/VBI counter */
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_format_measure_cfg0_addr);
+ val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+ cam_io_w_mb(val,
+ soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_format_measure_cfg0_addr);
+ }
+
/* configure the rx packet capture based on csid debug set */
val = 0;
if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
@@ -1479,8 +1512,10 @@ static int cam_ife_csid_deinit_ipp_path(
struct cam_isp_resource_node *res)
{
int rc = 0;
+ uint32_t val = 0;
struct cam_ife_csid_reg_offset *csid_reg;
struct cam_hw_soc_info *soc_info;
+ struct cam_ife_csid_ipp_reg_offset *ipp_reg;
csid_reg = csid_hw->csid_info->csid_reg;
soc_info = &csid_hw->hw_info->soc_info;
@@ -1498,8 +1533,26 @@ static int cam_ife_csid_deinit_ipp_path(
csid_hw->hw_intf->hw_idx,
res->res_id);
rc = -EINVAL;
+ goto end;
+ }
+
+ ipp_reg = csid_reg->ipp_reg;
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ ipp_reg->csid_ipp_cfg0_addr);
+ if (val & csid_reg->cmn_reg->format_measure_en_val) {
+ val &= ~csid_reg->cmn_reg->format_measure_en_val;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ ipp_reg->csid_ipp_cfg0_addr);
+
+ /* Disable the HBI/VBI counter */
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ ipp_reg->csid_ipp_format_measure_cfg0_addr);
+ val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ ipp_reg->csid_ipp_format_measure_cfg0_addr);
}
+end:
res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
return rc;
}
@@ -1644,6 +1697,7 @@ static int cam_ife_csid_init_config_rdi_path(
struct cam_ife_csid_reg_offset *csid_reg;
struct cam_hw_soc_info *soc_info;
uint32_t path_format = 0, plain_fmt = 0, val = 0, id;
+ uint32_t format_measure_addr;
path_data = (struct cam_ife_csid_path_cfg *) res->res_priv;
csid_reg = csid_hw->csid_info->csid_reg;
@@ -1737,9 +1791,24 @@ static int cam_ife_csid_init_config_rdi_path(
csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO)
+ val |= csid_reg->cmn_reg->format_measure_en_val;
+
cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+ format_measure_addr =
+ csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr;
+
+ /* Enable the HBI/VBI counter */
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ format_measure_addr);
+ val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+ cam_io_w_mb(val,
+ soc_info->reg_map[0].mem_base + format_measure_addr);
+ }
+
/* configure the rx packet capture based on csid debug set */
if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
val = ((1 <<
@@ -1775,7 +1844,7 @@ static int cam_ife_csid_deinit_rdi_path(
struct cam_isp_resource_node *res)
{
int rc = 0;
- uint32_t id;
+ uint32_t id, val, format_measure_addr;
struct cam_ife_csid_reg_offset *csid_reg;
struct cam_hw_soc_info *soc_info;
@@ -1792,6 +1861,24 @@ static int cam_ife_csid_deinit_rdi_path(
return -EINVAL;
}
+ format_measure_addr =
+ csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr;
+
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+ val &= ~csid_reg->cmn_reg->format_measure_en_val;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+ /* Disable the HBI/VBI counter */
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ format_measure_addr);
+ val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ format_measure_addr);
+ }
+
res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
return rc;
}
@@ -1894,6 +1981,55 @@ static int cam_ife_csid_disable_rdi_path(
return rc;
}
+static int cam_ife_csid_get_hbi_vbi(
+ struct cam_ife_csid_hw *csid_hw,
+ struct cam_isp_resource_node *res)
+{
+ uint32_t hbi, vbi;
+ const struct cam_ife_csid_reg_offset *csid_reg;
+ const struct cam_ife_csid_rdi_reg_offset *rdi_reg;
+ struct cam_hw_soc_info *soc_info;
+
+ csid_reg = csid_hw->csid_info->csid_reg;
+ soc_info = &csid_hw->hw_info->soc_info;
+
+ if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
+ res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d",
+ csid_hw->hw_intf->hw_idx, res->res_type,
+ res->res_id);
+ return -EINVAL;
+ }
+
+ if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+ CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d",
+ csid_hw->hw_intf->hw_idx,
+ csid_hw->hw_info->hw_state);
+ return -EINVAL;
+ }
+
+ if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+ hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_format_measure1_addr);
+ vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_format_measure2_addr);
+ } else {
+ rdi_reg = csid_reg->rdi_reg[res->res_id];
+ hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ rdi_reg->csid_rdi_format_measure1_addr);
+ vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ rdi_reg->csid_rdi_format_measure2_addr);
+ }
+
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "Resource %u HBI: 0x%x", res->res_id,
+ hbi);
+ CAM_INFO_RATE_LIMIT(CAM_ISP, "Resource %u VBI: 0x%x", res->res_id,
+ vbi);
+
+ return 0;
+}
+
+
static int cam_ife_csid_get_time_stamp(
struct cam_ife_csid_hw *csid_hw, void *cmd_args)
{
@@ -2498,18 +2634,20 @@ static int cam_ife_csid_sof_irq_debug(
if (*((uint32_t *)cmd_args) == 1)
sof_irq_enable = true;
- val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
- csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+ if (csid_reg->ipp_reg) {
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
- if (val) {
- if (sof_irq_enable)
- val |= CSID_PATH_INFO_INPUT_SOF;
- else
- val &= ~CSID_PATH_INFO_INPUT_SOF;
+ if (val) {
+ if (sof_irq_enable)
+ val |= CSID_PATH_INFO_INPUT_SOF;
+ else
+ val &= ~CSID_PATH_INFO_INPUT_SOF;
- cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
- csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
- val = 0;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->ipp_reg->csid_ipp_irq_mask_addr);
+ val = 0;
+ }
}
for (i = 0; i < csid_reg->cmn_reg->no_rdis; i++) {
@@ -2547,6 +2685,7 @@ static int cam_ife_csid_process_cmd(void *hw_priv,
int rc = 0;
struct cam_ife_csid_hw *csid_hw;
struct cam_hw_info *csid_hw_info;
+ struct cam_isp_resource_node *res = NULL;
if (!hw_priv || !cmd_args) {
CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
@@ -2559,6 +2698,11 @@ static int cam_ife_csid_process_cmd(void *hw_priv,
switch (cmd_type) {
case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
+ if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+ res = ((struct cam_csid_get_time_stamp_args *)
+ cmd_args)->node_res;
+ cam_ife_csid_get_hbi_vbi(csid_hw, res);
+ }
break;
case CAM_IFE_CSID_SET_CSID_DEBUG:
rc = cam_ife_csid_set_csid_debug(csid_hw, cmd_args);
@@ -2645,18 +2789,22 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 0 over flow",
csid_hw->hw_intf->hw_idx);
+ csid_hw->error_irq_count++;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 1 over flow",
csid_hw->hw_intf->hw_idx);
+ csid_hw->error_irq_count++;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 2 over flow",
csid_hw->hw_intf->hw_idx);
+ csid_hw->error_irq_count++;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 3 over flow",
csid_hw->hw_intf->hw_idx);
+ csid_hw->error_irq_count++;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d TG OVER FLOW",
@@ -2677,6 +2825,7 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
if (irq_status_rx & CSID_CSI2_RX_ERROR_CRC) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_CRC",
csid_hw->hw_intf->hw_idx);
+ csid_hw->error_irq_count++;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_ECC) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_ECC",
@@ -2689,10 +2838,12 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
if (irq_status_rx & CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_STREAM_UNDERFLOW",
csid_hw->hw_intf->hw_idx);
+ csid_hw->error_irq_count++;
}
if (irq_status_rx & CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d UNBOUNDED_FRAME",
csid_hw->hw_intf->hw_idx);
+ csid_hw->error_irq_count++;
}
if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ) {
@@ -2858,6 +3009,26 @@ irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
csid_hw->irq_debug_cnt = 0;
}
+ if (csid_hw->error_irq_count >
+ CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT) {
+ /* Mask line overflow, underflow, unbound interrupts */
+ val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+ val &= ~(CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW |
+ CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW |
+ CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW |
+ CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW |
+ CSID_CSI2_RX_ERROR_CRC |
+ CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW |
+ CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME);
+
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+ CAM_WARN(CAM_ISP, "Masked csi rx error interrupts");
+ csid_hw->error_irq_count = 0;
+ }
+
CAM_DBG(CAM_ISP, "IRQ Handling exit");
return IRQ_HANDLED;
}
@@ -2975,6 +3146,7 @@ int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
}
ife_csid_hw->csid_debug = 0;
+ ife_csid_hw->error_irq_count = 0;
return 0;
err:
if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index ad993ebc277e..c547f4cb5286 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -76,6 +76,8 @@
#define CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE BIT(4)
#define CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE BIT(5)
#define CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE BIT(6)
+#define CSID_DEBUG_ENABLE_HBI_VBI_INFO BIT(7)
+#define CSID_DEBUG_DISABLE_EARLY_EOF BIT(8)
/* enum cam_csid_path_halt_mode select the path halt mode control */
enum cam_csid_path_halt_mode {
@@ -135,6 +137,7 @@ struct cam_ife_csid_ipp_reg_offset {
/* configuration */
uint32_t pix_store_en_shift_val;
+ uint32_t early_eof_en_shift_val;
};
struct cam_ife_csid_rdi_reg_offset {
@@ -285,6 +288,8 @@ struct cam_ife_csid_common_reg_offset {
uint32_t crop_shift;
uint32_t ipp_irq_mask_all;
uint32_t rdi_irq_mask_all;
+ uint32_t measure_en_hbi_vbi_cnt_mask;
+ uint32_t format_measure_en_val;
};
/**
@@ -438,6 +443,8 @@ struct cam_ife_csid_path_cfg {
* @sof_irq_triggered: Flag is set on receiving event to enable sof irq
* incase of SOF freeze.
* @irq_debug_cnt: Counter to track sof irq's when above flag is set.
+ * @error_irq_count Error IRQ count, if continuous error irq comes
+ * need to stop the CSID and mask interrupts.
*
*/
struct cam_ife_csid_hw {
@@ -461,6 +468,7 @@ struct cam_ife_csid_hw {
uint64_t clk_rate;
bool sof_irq_triggered;
uint32_t irq_debug_cnt;
+ uint32_t error_irq_count;
};
int cam_ife_csid_hw_probe_init(struct cam_hw_intf *csid_hw_intf,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 28cfcc8bea74..54aa4c23b4d1 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -96,6 +96,7 @@ enum cam_isp_hw_cmd_type {
CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ,
CAM_ISP_HW_CMD_GET_REG_DUMP,
CAM_ISP_HW_CMD_SOF_IRQ_DEBUG,
+ CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
CAM_ISP_HW_CMD_MAX,
};
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
index c7d3aa2fd487..d1284d9f23d2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe170/cam_vfe170.h
@@ -50,6 +50,8 @@ static struct cam_vfe_camif_ver2_reg vfe170_camif_reg = {
.raw_crop_width_cfg = 0x00000CE4,
.raw_crop_height_cfg = 0x00000CE8,
.reg_update_cmd = 0x000004AC,
+ .vfe_diag_config = 0x00000C48,
+ .vfe_diag_sensor_status = 0x00000C4C,
};
static struct cam_vfe_camif_reg_data vfe_170_camif_reg_data = {
@@ -79,6 +81,7 @@ static struct cam_vfe_camif_reg_data vfe_170_camif_reg_data = {
.eof_irq_mask = 0x00000002,
.error_irq_mask0 = 0x0003FC00,
.error_irq_mask1 = 0x0FFF7E80,
+ .enable_diagnostic_hw = 0x1,
};
struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_reg = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 0bb13741f2b1..54ec2823ae11 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -2438,7 +2438,7 @@ static int cam_vfe_bus_update_wm(void *priv, void *cmd_args,
for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
CAM_ERR(CAM_ISP,
- "reg_val_pair %d exceeds the array limit %lu",
+ "reg_val_pair %d exceeds the array limit %zu",
j, MAX_REG_VAL_PAIR_SIZE);
return -ENOMEM;
}
@@ -2711,7 +2711,7 @@ static int cam_vfe_bus_update_hfr(void *priv, void *cmd_args,
for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
CAM_ERR(CAM_ISP,
- "reg_val_pair %d exceeds the array limit %lu",
+ "reg_val_pair %d exceeds the array limit %zu",
j, MAX_REG_VAL_PAIR_SIZE);
return -ENOMEM;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
index 73b4ee79e758..fc257ecaa604 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -43,6 +43,7 @@ struct cam_vfe_mux_camif_data {
uint32_t last_line;
bool enable_sof_irq_debug;
uint32_t irq_debug_cnt;
+ uint32_t camif_debug;
};
static int cam_vfe_camif_validate_pix_pattern(uint32_t pattern)
@@ -211,6 +212,8 @@ static int cam_vfe_camif_resource_start(
uint32_t epoch0_irq_mask;
uint32_t epoch1_irq_mask;
uint32_t computed_epoch_line_cfg;
+ uint32_t camera_hw_version = 0;
+ int rc = 0;
if (!camif_res) {
CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
@@ -250,16 +253,50 @@ static int cam_vfe_camif_resource_start(
rsrc_data->common_reg->module_ctrl[
CAM_VFE_TOP_VER2_MODULE_STATS]->cgc_ovd);
+ /* get the HW version */
+ rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Couldn't find HW version. rc: %d", rc);
+ return rc;
+ }
+
/* epoch config */
- epoch0_irq_mask = ((rsrc_data->last_line - rsrc_data->first_line) / 2) +
- rsrc_data->first_line;
- epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg & 0xFFFF;
- computed_epoch_line_cfg = (epoch0_irq_mask << 16) | epoch1_irq_mask;
- cam_io_w_mb(computed_epoch_line_cfg,
- rsrc_data->mem_base + rsrc_data->camif_reg->epoch_irq);
- CAM_DBG(CAM_ISP, "first_line:%u last_line:%u epoch_line_cfg: 0x%x",
- rsrc_data->first_line, rsrc_data->last_line,
- computed_epoch_line_cfg);
+ switch (camera_hw_version) {
+ case CAM_CPAS_TITAN_175_V101:
+ case CAM_CPAS_TITAN_175_V100:
+ epoch0_irq_mask = ((rsrc_data->last_line -
+ rsrc_data->first_line) / 2) +
+ rsrc_data->first_line;
+ epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg &
+ 0xFFFF;
+ computed_epoch_line_cfg = (epoch0_irq_mask << 16) |
+ epoch1_irq_mask;
+ cam_io_w_mb(computed_epoch_line_cfg,
+ rsrc_data->mem_base +
+ rsrc_data->camif_reg->epoch_irq);
+ CAM_DBG(CAM_ISP, "first_line: %u\n"
+ "last_line: %u\n"
+ "epoch_line_cfg: 0x%x",
+ rsrc_data->first_line,
+ rsrc_data->last_line,
+ computed_epoch_line_cfg);
+ break;
+ case CAM_CPAS_TITAN_170_V100:
+ case CAM_CPAS_TITAN_170_V110:
+ case CAM_CPAS_TITAN_170_V120:
+ cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg,
+ rsrc_data->mem_base +
+ rsrc_data->camif_reg->epoch_irq);
+ break;
+ default:
+ cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg,
+ rsrc_data->mem_base +
+ rsrc_data->camif_reg->epoch_irq);
+ CAM_WARN(CAM_ISP, "Hardware version not proper: 0x%x",
+ camera_hw_version);
+ break;
+ }
camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
@@ -273,6 +310,15 @@ static int cam_vfe_camif_resource_start(
rsrc_data->enable_sof_irq_debug = false;
rsrc_data->irq_debug_cnt = 0;
+ if (rsrc_data->camif_debug &
+ CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+ val = cam_io_r_mb(rsrc_data->mem_base +
+ rsrc_data->camif_reg->vfe_diag_config);
+ val |= rsrc_data->reg_data->enable_diagnostic_hw;
+ cam_io_w_mb(val, rsrc_data->mem_base +
+ rsrc_data->camif_reg->vfe_diag_config);
+ }
+
CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx);
return 0;
}
@@ -364,6 +410,14 @@ static int cam_vfe_camif_resource_stop(
if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
camif_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+ val = cam_io_r_mb(camif_priv->mem_base +
+ camif_priv->camif_reg->vfe_diag_config);
+ if (val & camif_priv->reg_data->enable_diagnostic_hw) {
+ val &= ~camif_priv->reg_data->enable_diagnostic_hw;
+ cam_io_w_mb(val, camif_priv->mem_base +
+ camif_priv->camif_reg->vfe_diag_config);
+ }
+
return rc;
}
@@ -388,6 +442,7 @@ static int cam_vfe_camif_process_cmd(struct cam_isp_resource_node *rsrc_node,
uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
{
int rc = -EINVAL;
+ struct cam_vfe_mux_camif_data *camif_priv = NULL;
if (!rsrc_node || !cmd_args) {
CAM_ERR(CAM_ISP, "Invalid input arguments");
@@ -405,6 +460,11 @@ static int cam_vfe_camif_process_cmd(struct cam_isp_resource_node *rsrc_node,
case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG:
rc = cam_vfe_camif_sof_irq_debug(rsrc_node, cmd_args);
break;
+ case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG:
+ camif_priv =
+ (struct cam_vfe_mux_camif_data *)rsrc_node->res_priv;
+ camif_priv->camif_debug = *((uint32_t *)cmd_args);
+ break;
default:
CAM_ERR(CAM_ISP,
"unsupported process command:%d", cmd_type);
@@ -429,6 +489,7 @@ static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv,
struct cam_vfe_top_irq_evt_payload *payload;
uint32_t irq_status0;
uint32_t irq_status1;
+ uint32_t val;
if (!handler_priv || !evt_payload_priv) {
CAM_ERR(CAM_ISP, "Invalid params");
@@ -491,6 +552,14 @@ static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv,
} else {
ret = CAM_ISP_HW_ERROR_NONE;
}
+
+ if (camif_priv->camif_debug &
+ CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+ val = cam_io_r(camif_priv->mem_base +
+ camif_priv->camif_reg->vfe_diag_sensor_status);
+ CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
+ camif_priv->mem_base, val);
+ }
break;
default:
break;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
index 4a73bd74c097..7a6958930caa 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,11 @@
#include "cam_isp_hw.h"
#include "cam_vfe_top.h"
+/*
+ * Debug values for camif module
+ */
+#define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS BIT(0)
+
struct cam_vfe_camif_ver2_reg {
uint32_t camif_cmd;
uint32_t camif_config;
@@ -27,6 +32,8 @@ struct cam_vfe_camif_ver2_reg {
uint32_t raw_crop_width_cfg;
uint32_t raw_crop_height_cfg;
uint32_t reg_update_cmd;
+ uint32_t vfe_diag_config;
+ uint32_t vfe_diag_sensor_status;
};
struct cam_vfe_camif_reg_data {
@@ -63,6 +70,8 @@ struct cam_vfe_camif_reg_data {
uint32_t eof_irq_mask;
uint32_t error_irq_mask0;
uint32_t error_irq_mask1;
+
+ uint32_t enable_diagnostic_hw;
};
struct cam_vfe_camif_ver2_hw_info {
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
index 02334a4e8195..1c910621b655 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,8 +20,48 @@
#include "cam_jpeg_context.h"
#include "cam_context_utils.h"
#include "cam_debug_util.h"
+#include "cam_packet_util.h"
-static const char jpeg_dev_name[] = "jpeg";
+static const char jpeg_dev_name[] = "cam-jpeg";
+
+static int cam_jpeg_context_dump_active_request(void *data, unsigned long iova,
+ uint32_t buf_info)
+{
+
+ struct cam_context *ctx = (struct cam_context *)data;
+ struct cam_ctx_request *req = NULL;
+ struct cam_ctx_request *req_temp = NULL;
+ struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
+ int rc = 0;
+ int closest_port;
+ bool b_mem_found = false;
+
+
+ if (!ctx) {
+ CAM_ERR(CAM_JPEG, "Invalid ctx");
+ return -EINVAL;
+ }
+
+ CAM_INFO(CAM_JPEG, "iommu fault for jpeg ctx %d state %d",
+ ctx->ctx_id, ctx->state);
+
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->active_req_list, list) {
+ pf_dbg_entry = &(req->pf_data);
+ closest_port = -1;
+ CAM_INFO(CAM_JPEG, "req_id : %lld ", req->request_id);
+
+ rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+ iova, buf_info, &b_mem_found);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Failed to dump pf info");
+
+ if (b_mem_found)
+ CAM_ERR(CAM_JPEG, "Found page fault in req %lld %d",
+ req->request_id, rc);
+ }
+ return rc;
+}
static int __cam_jpeg_ctx_acquire_dev_in_available(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
@@ -116,6 +156,7 @@ static struct cam_ctx_ops
},
.crm_ops = { },
.irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired,
+ .pagefault_ops = cam_jpeg_context_dump_active_request,
},
};
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
index 46cc08f7ea5f..14892224e412 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
@@ -22,11 +22,31 @@
#include "cam_jpeg_hw_mgr_intf.h"
#include "cam_jpeg_dev.h"
#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
#define CAM_JPEG_DEV_NAME "cam-jpeg"
static struct cam_jpeg_dev g_jpeg_dev;
+static void cam_jpeg_dev_iommu_fault_handler(
+ struct iommu_domain *domain, struct device *dev, unsigned long iova,
+ int flags, void *token, uint32_t buf_info)
+{
+ int i = 0;
+ struct cam_node *node = NULL;
+
+ if (!token) {
+ CAM_ERR(CAM_JPEG, "invalid token in page handler cb");
+ return;
+ }
+
+ node = (struct cam_node *)token;
+
+ for (i = 0; i < node->ctx_size; i++)
+ cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+ buf_info);
+}
+
static const struct of_device_id cam_jpeg_dt_match[] = {
{
.compatible = "qcom,cam-jpeg"
@@ -34,23 +54,50 @@ static const struct of_device_id cam_jpeg_dt_match[] = {
{ }
};
+static int cam_jpeg_subdev_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+
+ mutex_lock(&g_jpeg_dev.jpeg_mutex);
+ g_jpeg_dev.open_cnt++;
+ mutex_unlock(&g_jpeg_dev.jpeg_mutex);
+
+ return 0;
+}
+
static int cam_jpeg_subdev_close(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
+ int rc = 0;
struct cam_node *node = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&g_jpeg_dev.jpeg_mutex);
+ if (g_jpeg_dev.open_cnt <= 0) {
+ CAM_DBG(CAM_JPEG, "JPEG subdev is already closed");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ g_jpeg_dev.open_cnt--;
+
if (!node) {
CAM_ERR(CAM_JPEG, "Node ptr is NULL");
- return -EINVAL;
+ rc = -EINVAL;
+ goto end;
}
- cam_node_shutdown(node);
+ if (g_jpeg_dev.open_cnt == 0)
+ cam_node_shutdown(node);
- return 0;
+end:
+ mutex_unlock(&g_jpeg_dev.jpeg_mutex);
+ return rc;
}
static const struct v4l2_subdev_internal_ops cam_jpeg_subdev_internal_ops = {
.close = cam_jpeg_subdev_close,
+ .open = cam_jpeg_subdev_open,
};
static int cam_jpeg_dev_remove(struct platform_device *pdev)
@@ -78,6 +125,7 @@ static int cam_jpeg_dev_probe(struct platform_device *pdev)
int i;
struct cam_hw_mgr_intf hw_mgr_intf;
struct cam_node *node;
+ int iommu_hdl = -1;
g_jpeg_dev.sd.internal_ops = &cam_jpeg_subdev_internal_ops;
rc = cam_subdev_probe(&g_jpeg_dev.sd, pdev, CAM_JPEG_DEV_NAME,
@@ -89,7 +137,7 @@ static int cam_jpeg_dev_probe(struct platform_device *pdev)
node = (struct cam_node *)g_jpeg_dev.sd.token;
rc = cam_jpeg_hw_mgr_init(pdev->dev.of_node,
- (uint64_t *)&hw_mgr_intf);
+ (uint64_t *)&hw_mgr_intf, &iommu_hdl);
if (rc) {
CAM_ERR(CAM_JPEG, "Can not initialize JPEG HWmanager %d", rc);
goto unregister;
@@ -114,6 +162,9 @@ static int cam_jpeg_dev_probe(struct platform_device *pdev)
goto ctx_init_fail;
}
+ cam_smmu_set_client_page_fault_handler(iommu_hdl,
+ cam_jpeg_dev_iommu_fault_handler, node);
+
mutex_init(&g_jpeg_dev.jpeg_mutex);
CAM_INFO(CAM_JPEG, "Camera JPEG probe complete");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
index deab2d5c0d02..0d15ced16e80 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,6 +26,7 @@
* @ctx: JPEG base context storage
* @ctx_jpeg: JPEG private context storage
* @jpeg_mutex: Jpeg dev mutex
+ * @open_cnt: Open device count
*/
struct cam_jpeg_dev {
struct cam_subdev sd;
@@ -33,5 +34,6 @@ struct cam_jpeg_dev {
struct cam_context ctx[CAM_CTX_MAX];
struct cam_jpeg_context ctx_jpeg[CAM_CTX_MAX];
struct mutex jpeg_mutex;
+ int32_t open_cnt;
};
#endif /* __CAM_JPEG_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index f0913b2df834..7f0199f6e119 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -34,6 +34,7 @@
#include "cam_mem_mgr.h"
#include "cam_cdm_intf_api.h"
#include "cam_debug_util.h"
+#include "cam_common_util.h"
#define CAM_JPEG_HW_ENTRIES_MAX 20
#define CAM_JPEG_CHBASE 0
@@ -55,8 +56,8 @@ static int cam_jpeg_mgr_process_irq(void *priv, void *data)
struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
struct cam_hw_done_event_data buf_data;
struct cam_jpeg_set_irq_cb irq_cb;
- uint32_t dev_type = 0;
- uint64_t kaddr;
+ uintptr_t dev_type = 0;
+ uintptr_t kaddr;
uint32_t *cmd_buf_kaddr;
size_t cmd_buf_len;
struct cam_jpeg_config_inout_param_info *p_params;
@@ -113,7 +114,7 @@ static int cam_jpeg_mgr_process_irq(void *priv, void *data)
rc = hw_mgr->devices[dev_type][0]->hw_ops.deinit(
hw_mgr->devices[dev_type][0]->hw_priv, NULL, 0);
if (rc)
- CAM_ERR(CAM_JPEG, "Failed to Deinit %d HW", dev_type);
+ CAM_ERR(CAM_JPEG, "Failed to Deinit %lu HW", dev_type);
}
hw_mgr->device_in_use[dev_type][0] = false;
@@ -133,7 +134,7 @@ static int cam_jpeg_mgr_process_irq(void *priv, void *data)
CAM_ERR(CAM_JPEG, "task_data is NULL");
return -EINVAL;
}
- wq_task_data->data = (void *)(uint64_t)dev_type;
+ wq_task_data->data = (void *)dev_type;
wq_task_data->request_id = 0;
wq_task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_jpeg_mgr_process_cmd;
@@ -146,7 +147,7 @@ static int cam_jpeg_mgr_process_irq(void *priv, void *data)
rc = cam_mem_get_cpu_buf(
p_cfg_req->hw_cfg_args.hw_update_entries[CAM_JPEG_PARAM].handle,
- (uint64_t *)&kaddr, &cmd_buf_len);
+ &kaddr, &cmd_buf_len);
if (rc) {
CAM_ERR(CAM_JPEG, "unable to get info for cmd buf: %x %d",
hw_mgr->iommu_hdl, rc);
@@ -172,7 +173,7 @@ static int cam_jpeg_mgr_process_irq(void *priv, void *data)
p_cfg_req->hw_cfg_args.out_map_entries[i].resource_handle;
}
buf_data.request_id =
- (uint64_t)p_cfg_req->hw_cfg_args.priv;
+ PTR_TO_U64(p_cfg_req->hw_cfg_args.priv);
ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
@@ -262,7 +263,7 @@ static int cam_jpeg_insert_cdm_change_base(
struct cam_cdm_bl_request *cdm_cmd;
uint32_t size;
uint32_t mem_cam_base;
- uint64_t iova_addr;
+ uintptr_t iova_addr;
uint32_t *ch_base_iova_addr;
size_t ch_base_len;
@@ -314,7 +315,7 @@ static int cam_jpeg_mgr_process_cmd(void *priv, void *data)
struct cam_cdm_bl_request *cdm_cmd;
struct cam_hw_config_args *config_args = NULL;
struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
- uint64_t request_id = 0;
+ uintptr_t request_id = 0;
struct cam_jpeg_process_frame_work_data_t *task_data =
(struct cam_jpeg_process_frame_work_data_t *)data;
uint32_t dev_type;
@@ -357,9 +358,9 @@ static int cam_jpeg_mgr_process_cmd(void *priv, void *data)
config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args;
request_id = task_data->request_id;
- if (request_id != (uint64_t)config_args->priv) {
- CAM_DBG(CAM_JPEG, "not a recent req %lld %lld",
- request_id, (uint64_t)config_args->priv);
+ if (request_id != (uintptr_t)config_args->priv) {
+ CAM_DBG(CAM_JPEG, "not a recent req %zd %zd",
+ request_id, (uintptr_t)config_args->priv);
}
if (!config_args->num_hw_update_entries) {
@@ -489,7 +490,8 @@ end_callcb:
buf_data.resource_handle[i] =
hw_cfg_args->out_map_entries[i].resource_handle;
}
- buf_data.request_id = (uint64_t)p_cfg_req->hw_cfg_args.priv;
+ buf_data.request_id =
+ (uintptr_t)p_cfg_req->hw_cfg_args.priv;
ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
}
@@ -509,7 +511,7 @@ static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
struct cam_hw_config_args *config_args = config_hw_args;
struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
- uint64_t request_id = 0;
+ uintptr_t request_id = 0;
struct cam_hw_update_entry *hw_update_entries;
struct crm_workq_task *task;
struct cam_jpeg_process_frame_work_data_t *task_data;
@@ -549,11 +551,11 @@ static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
p_cfg_req->hw_cfg_args = *config_args;
p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
- request_id = (uint64_t)config_args->priv;
+ request_id = (uintptr_t)config_args->priv;
p_cfg_req->req_id = request_id;
hw_update_entries = config_args->hw_update_entries;
- CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %lld %lld",
- ctx_data, request_id, (uint64_t)config_args->priv);
+ CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %lld %zd",
+ ctx_data, request_id, (uintptr_t)config_args->priv);
task = cam_req_mgr_workq_get_task(g_jpeg_hw_mgr.work_process_frame);
if (!task) {
CAM_ERR(CAM_JPEG, "no empty task");
@@ -578,7 +580,7 @@ static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
list_add_tail(&p_cfg_req->list, &hw_mgr->hw_config_req_list);
mutex_unlock(&hw_mgr->hw_mgr_mutex);
- task_data->data = (void *)(int64_t)p_cfg_req->dev_type;
+ task_data->data = (void *)(uintptr_t)p_cfg_req->dev_type;
task_data->request_id = request_id;
task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
task->process_cb = cam_jpeg_mgr_process_cmd;
@@ -600,6 +602,74 @@ err_after_dq_free_list:
return rc;
}
+static void cam_jpeg_mgr_print_io_bufs(struct cam_packet *packet,
+ int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+ bool *mem_found)
+{
+ uint64_t iova_addr;
+ size_t src_buf_size;
+ int i;
+ int j;
+ int rc = 0;
+ int32_t mmu_hdl;
+ struct cam_buf_io_cfg *io_cfg = NULL;
+
+ if (mem_found)
+ *mem_found = false;
+
+ io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+ packet->io_configs_offset / 4);
+
+ for (i = 0; i < packet->num_io_configs; i++) {
+ for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+ if (!io_cfg[i].mem_handle[j])
+ break;
+
+ if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+ GET_FD_FROM_HANDLE(pf_buf_info)) {
+ CAM_INFO(CAM_JPEG,
+ "Found PF at port: %d mem %x fd: %x",
+ io_cfg[i].resource_type,
+ io_cfg[i].mem_handle[j],
+ pf_buf_info);
+ if (mem_found)
+ *mem_found = true;
+ }
+
+ CAM_INFO(CAM_JPEG, "port: %d f: %u format: %d dir %d",
+ io_cfg[i].resource_type,
+ io_cfg[i].fence,
+ io_cfg[i].format,
+ io_cfg[i].direction);
+
+ mmu_hdl = cam_mem_is_secure_buf(
+ io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+ iommu_hdl;
+ rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+ mmu_hdl, &iova_addr, &src_buf_size);
+ if (rc < 0) {
+ CAM_ERR(CAM_UTIL, "get src buf address fail");
+ continue;
+ }
+ if (iova_addr >> 32) {
+ CAM_ERR(CAM_JPEG, "Invalid mapped address");
+ rc = -EINVAL;
+ continue;
+ }
+
+ CAM_INFO(CAM_JPEG,
+ "pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x",
+ j, io_cfg[i].planes[j].width,
+ io_cfg[i].planes[j].height,
+ (int32_t)src_buf_size,
+ (unsigned int)iova_addr,
+ io_cfg[i].offsets[j],
+ io_cfg[i].mem_handle[j]);
+
+ iova_addr += io_cfg[i].offsets[j];
+ }
+ }
+}
static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
void *prepare_hw_update_args)
@@ -675,6 +745,7 @@ static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
CAM_DBG(CAM_JPEG, "packet = %pK io_cfg_ptr = %pK size = %lu",
(void *)packet, (void *)io_cfg_ptr,
sizeof(struct cam_buf_io_cfg));
+ prepare_args->pf_data->packet = packet;
prepare_args->num_out_map_entries = 0;
@@ -721,7 +792,7 @@ static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
(uint32_t)cmd_desc[i].offset;
}
prepare_args->num_hw_update_entries = j;
- prepare_args->priv = (void *)packet->header.request_id;
+ prepare_args->priv = (void *)(uintptr_t)packet->header.request_id;
CAM_DBG(CAM_JPEG, "will wait on input sync sync_id %d",
prepare_args->in_map_entries[0].sync_id);
@@ -827,7 +898,7 @@ static int cam_jpeg_mgr_flush_req(void *hw_mgr_priv,
struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
struct cam_jpeg_hw_cfg_req *cfg_req = NULL;
struct cam_jpeg_hw_cfg_req *req_temp = NULL;
- int64_t request_id = 0;
+ long request_id = 0;
uint32_t dev_type;
struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
bool b_req_found = false;
@@ -842,13 +913,13 @@ static int cam_jpeg_mgr_flush_req(void *hw_mgr_priv,
if (flush_args->num_req_pending)
return 0;
- request_id = (int64_t)flush_args->flush_req_active[0];
+ request_id = (uintptr_t)flush_args->flush_req_active[0];
if (!flush_args->num_req_active)
return 0;
if (request_id <= 0) {
- CAM_ERR(CAM_JPEG, "Invalid red id %lld", request_id);
+ CAM_ERR(CAM_JPEG, "Invalid red id %ld", request_id);
return -EINVAL;
}
@@ -885,7 +956,7 @@ static int cam_jpeg_mgr_flush_req(void *hw_mgr_priv,
}
if (!b_req_found) {
- CAM_ERR(CAM_JPEG, "req not found %lld", request_id);
+ CAM_ERR(CAM_JPEG, "req not found %ld", request_id);
return -EINVAL;
}
@@ -1189,7 +1260,7 @@ static int cam_jpeg_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
mutex_lock(&hw_mgr->hw_mgr_mutex);
- if (copy_to_user((void __user *)query_cap->caps_handle,
+ if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle),
&g_jpeg_hw_mgr.jpeg_caps,
sizeof(struct cam_jpeg_query_cap_cmd))) {
CAM_ERR(CAM_JPEG, "copy_to_user failed");
@@ -1410,7 +1481,35 @@ num_dev_failed:
return rc;
}
-int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
+static int cam_jpeg_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+ int rc = 0;
+ struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+
+ if (!hw_mgr_priv || !cmd_args) {
+ CAM_ERR(CAM_JPEG, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ switch (hw_cmd_args->cmd_type) {
+ case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+ cam_jpeg_mgr_print_io_bufs(
+ hw_cmd_args->u.pf_args.pf_data.packet,
+ hw_mgr->iommu_hdl,
+ hw_mgr->iommu_sec_hdl,
+ hw_cmd_args->u.pf_args.buf_info,
+ hw_cmd_args->u.pf_args.mem_found);
+ break;
+ default:
+ CAM_ERR(CAM_JPEG, "Invalid cmd");
+ }
+
+ return rc;
+}
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+ int *iommu_hdl)
{
int i, rc;
uint32_t num_dev;
@@ -1434,6 +1533,7 @@ int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw;
hw_mgr_intf->hw_flush = cam_jpeg_mgr_hw_flush;
hw_mgr_intf->hw_stop = cam_jpeg_mgr_hw_stop;
+ hw_mgr_intf->hw_cmd = cam_jpeg_mgr_cmd;
mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex);
spin_lock_init(&g_jpeg_hw_mgr.hw_mgr_lock);
@@ -1495,6 +1595,9 @@ int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl)
goto cdm_iommu_failed;
}
+ if (iommu_hdl)
+ *iommu_hdl = g_jpeg_hw_mgr.iommu_hdl;
+
return rc;
cdm_iommu_failed:
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
index 5e1016725095..82022ec24a77 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
@@ -38,7 +38,7 @@
struct cam_jpeg_process_frame_work_data_t {
uint32_t type;
void *data;
- uint64_t request_id;
+ uintptr_t request_id;
};
/**
@@ -81,7 +81,7 @@ struct cam_jpeg_hw_cfg_req {
struct list_head list;
struct cam_hw_config_args hw_cfg_args;
uint32_t dev_type;
- int64_t req_id;
+ uintptr_t req_id;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
index 5fb4e3ad3399..5705890cd109 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,8 +17,7 @@
#include <uapi/media/cam_defs.h>
#include <linux/of.h>
-
int cam_jpeg_hw_mgr_init(struct device_node *of_node,
- uint64_t *hw_mgr_hdl);
+ uint64_t *hw_mgr_hdl, int *iommu_hdl);
#endif /* CAM_JPEG_HW_MGR_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
index 3d0266d6fb13..26bdc31250d1 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,16 +16,16 @@
#include "cam_debug_util.h"
#include "cam_lrme_context.h"
-static const char lrme_dev_name[] = "lrme";
+static const char lrme_dev_name[] = "cam-lrme";
static int __cam_lrme_ctx_acquire_dev_in_available(struct cam_context *ctx,
struct cam_acquire_dev_cmd *cmd)
{
int rc = 0;
- uint64_t ctxt_to_hw_map = (uint64_t)ctx->ctxt_to_hw_map;
+ uintptr_t ctxt_to_hw_map = (uintptr_t)ctx->ctxt_to_hw_map;
struct cam_lrme_context *lrme_ctx = ctx->ctx_priv;
- CAM_DBG(CAM_LRME, "Enter");
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
rc = cam_context_acquire_dev_to_hw(ctx, cmd);
if (rc) {
@@ -46,7 +46,7 @@ static int __cam_lrme_ctx_release_dev_in_acquired(struct cam_context *ctx,
{
int rc = 0;
- CAM_DBG(CAM_LRME, "Enter");
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
rc = cam_context_release_dev_to_hw(ctx, cmd);
if (rc) {
@@ -64,7 +64,7 @@ static int __cam_lrme_ctx_start_dev_in_acquired(struct cam_context *ctx,
{
int rc = 0;
- CAM_DBG(CAM_LRME, "Enter");
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
rc = cam_context_start_dev_to_hw(ctx, cmd);
if (rc) {
@@ -82,7 +82,7 @@ static int __cam_lrme_ctx_config_dev_in_activated(struct cam_context *ctx,
{
int rc;
- CAM_DBG(CAM_LRME, "Enter");
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
rc = cam_context_prepare_dev_to_hw(ctx, cmd);
if (rc) {
@@ -98,6 +98,8 @@ static int __cam_lrme_ctx_flush_dev_in_activated(struct cam_context *ctx,
{
int rc;
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
rc = cam_context_flush_dev_to_hw(ctx, cmd);
if (rc)
CAM_ERR(CAM_LRME, "Failed to flush device");
@@ -109,7 +111,7 @@ static int __cam_lrme_ctx_stop_dev_in_activated(struct cam_context *ctx,
{
int rc = 0;
- CAM_DBG(CAM_LRME, "Enter");
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
rc = cam_context_stop_dev_to_hw(ctx);
if (rc) {
@@ -127,7 +129,7 @@ static int __cam_lrme_ctx_release_dev_in_activated(struct cam_context *ctx,
{
int rc = 0;
- CAM_DBG(CAM_LRME, "Enter");
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
rc = __cam_lrme_ctx_stop_dev_in_activated(ctx, NULL);
if (rc) {
@@ -182,6 +184,7 @@ static struct cam_ctx_ops
/* Acquired */
{
.ioctl_ops = {
+ .config_dev = __cam_lrme_ctx_config_dev_in_activated,
.release_dev = __cam_lrme_ctx_release_dev_in_acquired,
.start_dev = __cam_lrme_ctx_start_dev_in_acquired,
},
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
index 4c705c139405..dc1c8f4c10aa 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
@@ -19,7 +19,7 @@
#include "cam_req_mgr_interface.h"
#include "cam_sync_api.h"
-#define CAM_LRME_CTX_INDEX_SHIFT 32
+#define CAM_LRME_CTX_INDEX_SHIFT 16
/**
* struct cam_lrme_context
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
index a4ee1040e4c8..6b1250aea714 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
@@ -81,6 +81,7 @@ static int cam_lrme_dev_open(struct v4l2_subdev *sd,
static int cam_lrme_dev_close(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
+ int rc = 0;
struct cam_lrme_dev *lrme_dev = g_lrme_dev;
struct cam_node *node = v4l2_get_subdevdata(sd);
@@ -90,18 +91,25 @@ static int cam_lrme_dev_close(struct v4l2_subdev *sd,
}
mutex_lock(&lrme_dev->lock);
- lrme_dev->open_cnt--;
- mutex_unlock(&lrme_dev->lock);
+ if (lrme_dev->open_cnt <= 0) {
+ CAM_DBG(CAM_LRME, "LRME subdev is already closed");
+ rc = -EINVAL;
+ goto end;
+ }
+ lrme_dev->open_cnt--;
if (!node) {
CAM_ERR(CAM_LRME, "Node is NULL");
- return -EINVAL;
+ rc = -EINVAL;
+ goto end;
}
if (lrme_dev->open_cnt == 0)
cam_node_shutdown(node);
- return 0;
+end:
+ mutex_unlock(&lrme_dev->lock);
+ return rc;
}
static const struct v4l2_subdev_internal_ops cam_lrme_subdev_internal_ops = {
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
index fa8984ca2300..9a716d36a1b1 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -166,13 +166,6 @@ static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl,
io_cfg[i].resource_type,
io_cfg[i].fence, io_cfg[i].format);
- if ((num_in_buf > io_buf_size) ||
- (num_out_buf > io_buf_size)) {
- CAM_ERR(CAM_LRME, "Invalid number of buffers %d %d %d",
- num_in_buf, num_out_buf, io_buf_size);
- return -EINVAL;
- }
-
memset(io_addr, 0, sizeof(io_addr));
for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
if (!io_cfg[i].mem_handle[plane])
@@ -188,18 +181,18 @@ static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl,
io_addr[plane] += io_cfg[i].offsets[plane];
- if (io_addr[plane] >> 32) {
- CAM_ERR(CAM_LRME, "Invalid io addr for %d %d",
- plane, rc);
- return -ENOMEM;
- }
-
CAM_DBG(CAM_LRME, "IO Address[%d][%d] : %llu",
io_cfg[i].direction, plane, io_addr[plane]);
}
switch (io_cfg[i].direction) {
case CAM_BUF_INPUT: {
+ if (num_in_buf >= io_buf_size) {
+ CAM_ERR(CAM_LRME,
+ "Invalid number of buffers %d %d %d",
+ num_in_buf, num_out_buf, io_buf_size);
+ return -EINVAL;
+ }
prepare->in_map_entries[num_in_buf].resource_handle =
io_cfg[i].resource_type;
prepare->in_map_entries[num_in_buf].sync_id =
@@ -215,6 +208,12 @@ static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl,
break;
}
case CAM_BUF_OUTPUT: {
+ if (num_out_buf >= io_buf_size) {
+ CAM_ERR(CAM_LRME,
+ "Invalid number of buffers %d %d %d",
+ num_in_buf, num_out_buf, io_buf_size);
+ return -EINVAL;
+ }
prepare->out_map_entries[num_out_buf].resource_handle =
io_cfg[i].resource_type;
prepare->out_map_entries[num_out_buf].sync_id =
@@ -571,12 +570,13 @@ static int cam_lrme_mgr_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
if (sizeof(struct cam_lrme_query_cap_cmd) != args->size) {
CAM_ERR(CAM_LRME,
- "sizeof(struct cam_query_cap_cmd) = %lu, args->size = %d",
+ "sizeof(struct cam_query_cap_cmd) = %zu, args->size = %d",
sizeof(struct cam_query_cap_cmd), args->size);
return -EFAULT;
}
- if (copy_to_user((void __user *)args->caps_handle, &(hw_mgr->lrme_caps),
+ if (copy_to_user(u64_to_user_ptr(args->caps_handle),
+ &(hw_mgr->lrme_caps),
sizeof(struct cam_lrme_query_cap_cmd))) {
CAM_ERR(CAM_LRME, "copy to user failed");
return -EFAULT;
@@ -591,7 +591,7 @@ static int cam_lrme_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
struct cam_hw_acquire_args *args =
(struct cam_hw_acquire_args *)hw_acquire_args;
struct cam_lrme_acquire_args lrme_acquire_args;
- uint64_t device_index;
+ uintptr_t device_index;
if (!hw_mgr_priv || !args) {
CAM_ERR(CAM_LRME,
@@ -612,7 +612,7 @@ static int cam_lrme_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
CAM_DBG(CAM_LRME, "Get device id %llu", device_index);
if (device_index >= hw_mgr->device_count) {
- CAM_ERR(CAM_LRME, "Get wrong device id %llu", device_index);
+ CAM_ERR(CAM_LRME, "Get wrong device id %lu", device_index);
return -EINVAL;
}
@@ -667,7 +667,7 @@ static int cam_lrme_mgr_hw_flush(void *hw_mgr_priv, void *hw_flush_args)
}
args = (struct cam_hw_flush_args *)hw_flush_args;
- device_index = ((uint64_t)args->ctxt_to_hw_map & 0xF);
+ device_index = ((uintptr_t)args->ctxt_to_hw_map & 0xF);
if (device_index >= hw_mgr->device_count) {
CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
return -EPERM;
@@ -765,6 +765,12 @@ static int cam_lrme_mgr_hw_start(void *hw_mgr_priv, void *hw_start_args)
return -EINVAL;
}
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_DUMP_REGISTER,
+ &g_lrme_hw_mgr.debugfs_entry.dump_register,
+ sizeof(bool));
+
return rc;
}
@@ -963,6 +969,35 @@ static int cam_lrme_mgr_hw_config(void *hw_mgr_priv,
return rc;
}
+static int cam_lrme_mgr_create_debugfs_entry(void)
+{
+ int rc = 0;
+
+ g_lrme_hw_mgr.debugfs_entry.dentry =
+ debugfs_create_dir("camera_lrme", NULL);
+ if (!g_lrme_hw_mgr.debugfs_entry.dentry) {
+ CAM_ERR(CAM_LRME, "failed to create dentry");
+ return -ENOMEM;
+ }
+
+ if (!debugfs_create_bool("dump_register",
+ 0644,
+ g_lrme_hw_mgr.debugfs_entry.dentry,
+ &g_lrme_hw_mgr.debugfs_entry.dump_register)) {
+ CAM_ERR(CAM_LRME, "failed to create dump register entry");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ return rc;
+
+err:
+ debugfs_remove_recursive(g_lrme_hw_mgr.debugfs_entry.dentry);
+ g_lrme_hw_mgr.debugfs_entry.dentry = NULL;
+ return rc;
+}
+
+
int cam_lrme_mgr_register_device(
struct cam_hw_intf *lrme_hw_intf,
struct cam_iommu_handle *device_iommu,
@@ -1113,6 +1148,8 @@ int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb;
+ cam_lrme_mgr_create_debugfs_entry();
+
CAM_DBG(CAM_LRME, "Hw mgr init done");
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
index f7ce4d23cb63..87419cf59e00 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,13 +30,13 @@
#define CAM_LRME_WORKQ_NUM_TASK 10
#define CAM_LRME_DECODE_DEVICE_INDEX(ctxt_to_hw_map) \
- ((uint64_t)ctxt_to_hw_map & 0xF)
+ ((uintptr_t)ctxt_to_hw_map & 0xF)
#define CAM_LRME_DECODE_PRIORITY(ctxt_to_hw_map) \
- (((uint64_t)ctxt_to_hw_map & 0xF0) >> 4)
+ (((uintptr_t)ctxt_to_hw_map & 0xF0) >> 4)
#define CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map) \
- ((uint64_t)ctxt_to_hw_map >> CAM_LRME_CTX_INDEX_SHIFT)
+ ((uint64_t)(uintptr_t)ctxt_to_hw_map >> CAM_LRME_CTX_INDEX_SHIFT)
/**
* enum cam_lrme_hw_mgr_ctx_priority
@@ -52,13 +52,24 @@ enum cam_lrme_hw_mgr_ctx_priority {
/**
* struct cam_lrme_mgr_work_data : HW Mgr work data
*
- * hw_device : Pointer to the hw device
+ * @hw_device : Pointer to the hw device
*/
struct cam_lrme_mgr_work_data {
struct cam_lrme_device *hw_device;
};
/**
+ * struct cam_lrme_debugfs_entry : debugfs entry struct
+ *
+ * @dentry : entry of debugfs
+ * @dump_register : flag to dump registers
+ */
+struct cam_lrme_debugfs_entry {
+ struct dentry *dentry;
+ bool dump_register;
+};
+
+/**
* struct cam_lrme_device : LRME HW device
*
* @hw_caps : HW device's capabilities
@@ -98,6 +109,7 @@ struct cam_lrme_device {
* @frame_req : List of frame request to use
* @lrme_caps : LRME capabilities
* @event_cb : IRQ callback function
+ * @debugfs_entry : debugfs entry to set debug prop
*/
struct cam_lrme_hw_mgr {
uint32_t device_count;
@@ -110,6 +122,7 @@ struct cam_lrme_hw_mgr {
struct cam_lrme_frame_request frame_req[CAM_CTX_REQ_MAX * CAM_CTX_MAX];
struct cam_lrme_query_cap_cmd lrme_caps;
cam_hw_event_cb_func event_cb;
+ struct cam_lrme_debugfs_entry debugfs_entry;
};
int cam_lrme_mgr_register_device(struct cam_hw_intf *lrme_hw_intf,
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
index 595bb8182c8f..a5f9ff17ad22 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -14,6 +14,20 @@
#include "cam_lrme_hw_soc.h"
#include "cam_smmu_api.h"
+static void cam_lrme_dump_registers(void __iomem *base)
+{
+ /* dump the clc registers */
+ cam_io_dump(base, 0x60, (0xc0 - 0x60) / 0x4);
+ /* dump the fe and we registers */
+ cam_io_dump(base, 0x200, (0x29c - 0x200) / 0x4);
+ cam_io_dump(base, 0x2f0, (0x330 - 0x2f0) / 0x4);
+ cam_io_dump(base, 0x500, (0x5b4 - 0x500) / 0x4);
+ cam_io_dump(base, 0x700, (0x778 - 0x700) / 0x4);
+ cam_io_dump(base, 0x800, (0x878 - 0x800) / 0x4);
+ /* dump lrme sw registers, interrupts */
+ cam_io_dump(base, 0x900, (0x928 - 0x900) / 0x4);
+}
+
static void cam_lrme_cdm_write_reg_val_pair(uint32_t *buffer,
uint32_t *index, uint32_t reg_offset, uint32_t reg_value)
{
@@ -64,7 +78,8 @@ static void cam_lrme_hw_util_fill_fe_reg(struct cam_lrme_hw_io_buffer *io_buf,
cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
0x0);
- else if (io_buf->io_cfg->format == CAM_FORMAT_Y_ONLY)
+ else if (io_buf->io_cfg->format == CAM_FORMAT_Y_ONLY ||
+ io_buf->io_cfg->format == CAM_FORMAT_PLAIN8)
cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
0x1);
@@ -567,6 +582,8 @@ static int cam_lrme_hw_util_process_err(struct cam_hw_info *lrme_hw)
lrme_core->state);
}
+ cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base);
+
CAM_ERR_RATE_LIMIT(CAM_LRME, "Start recovery");
lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
@@ -610,6 +627,9 @@ static int cam_lrme_hw_util_process_reg_update(
lrme_core->req_proc = lrme_core->req_submit;
lrme_core->req_submit = NULL;
+ if (lrme_core->dump_flag)
+ cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base);
+
return 0;
}
@@ -654,13 +674,13 @@ void cam_lrme_set_irq(struct cam_hw_info *lrme_hw,
cam_io_w_mb(0xFFFF,
soc_info->reg_map[0].mem_base +
hw_info->titan_reg.top_irq_mask);
- cam_io_w_mb(0xFFFF,
+ cam_io_w_mb(0xFFFFF,
soc_info->reg_map[0].mem_base +
hw_info->bus_wr_reg.common_reg.irq_mask_0);
- cam_io_w_mb(0xFFFF,
+ cam_io_w_mb(0xFFFFF,
soc_info->reg_map[0].mem_base +
hw_info->bus_wr_reg.common_reg.irq_mask_1);
- cam_io_w_mb(0xFFFF,
+ cam_io_w_mb(0xFFFFF,
soc_info->reg_map[0].mem_base +
hw_info->bus_rd_reg.common_reg.irq_mask);
break;
@@ -900,7 +920,7 @@ int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
if (sizeof(struct cam_lrme_hw_submit_args) != arg_size) {
CAM_ERR(CAM_LRME,
- "size of args %lu, arg_size %d",
+ "size of args %zu, arg_size %d",
sizeof(struct cam_lrme_hw_submit_args), arg_size);
return -EINVAL;
}
@@ -952,6 +972,7 @@ int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
}
lrme_core->req_submit = frame_req;
+
mutex_unlock(&lrme_hw->hw_mutex);
CAM_DBG(CAM_LRME, "Release lock, submit done for req %llu",
frame_req->req_id);
@@ -1235,6 +1256,14 @@ int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
break;
}
+ case CAM_LRME_HW_CMD_DUMP_REGISTER: {
+ struct cam_lrme_core *lrme_core =
+ (struct cam_lrme_core *)lrme_hw->core_info;
+ lrme_core->dump_flag = *(bool *)cmd_args;
+ CAM_DBG(CAM_LRME, "dump_flag %d", lrme_core->dump_flag);
+ break;
+ }
+
default:
break;
}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
index bf2f37084cd1..cf8e7408ba3a 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
@@ -137,6 +137,7 @@ struct cam_lrme_core {
struct cam_lrme_frame_request *req_submit;
struct cam_lrme_cdm_info *hw_cdm_info;
uint32_t hw_idx;
+ bool dump_flag;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
index d16b174767cc..26b5608aedd7 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
@@ -65,11 +65,13 @@ enum cam_lrme_cb_type {
* @CAM_LRME_HW_CMD_prepare_hw_update : Prepare HW update
* @CAM_LRME_HW_CMD_REGISTER_CB : register HW manager callback
* @CAM_LRME_HW_CMD_SUBMIT : Submit frame to HW
+ * @CAM_LRME_HW_CMD_DUMP_REGISTER : dump register values
*/
enum cam_lrme_hw_cmd_type {
CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
CAM_LRME_HW_CMD_REGISTER_CB,
CAM_LRME_HW_CMD_SUBMIT,
+ CAM_LRME_HW_CMD_DUMP_REGISTER,
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index 0e77a4c01fb4..f2c243e8c7a9 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -25,11 +25,11 @@
static struct cam_mem_table tbl;
static int cam_mem_util_map_cpu_va(struct ion_handle *hdl,
- uint64_t *vaddr,
+ uintptr_t *vaddr,
size_t *len)
{
*vaddr = (uintptr_t)ion_map_kernel(tbl.client, hdl);
- if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ if (IS_ERR_OR_NULL((void *)(uintptr_t)(*vaddr))) {
CAM_ERR(CAM_MEM, "kernel map fail");
return -ENOSPC;
}
@@ -183,12 +183,12 @@ handle_mismatch:
}
EXPORT_SYMBOL(cam_mem_get_io_buf);
-int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr, size_t *len)
+int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
{
int rc = 0;
int idx;
struct ion_handle *ion_hdl = NULL;
- uint64_t kvaddr = 0;
+ uintptr_t kvaddr = 0;
size_t klen = 0;
if (!buf_handle || !vaddr_ptr || !len)
@@ -288,7 +288,7 @@ int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
rc = msm_ion_do_cache_op(tbl.client,
tbl.bufq[idx].i_hdl,
- (void *)tbl.bufq[idx].vaddr,
+ (void *)(uintptr_t)tbl.bufq[idx].vaddr,
tbl.bufq[idx].len,
ion_cache_ops);
if (rc)
@@ -926,7 +926,7 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
int rc = 0;
uint32_t heap_id;
int32_t ion_flag = 0;
- uint64_t kvaddr;
+ uintptr_t kvaddr;
dma_addr_t iova = 0;
size_t request_len = 0;
uint32_t mem_handle;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
index 83727d20e685..92c366d723f9 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -56,7 +56,7 @@ struct cam_mem_buf_queue {
size_t len;
uint32_t flags;
uint64_t vaddr;
- uint64_t kmdvaddr;
+ uintptr_t kmdvaddr;
bool active;
bool is_imported;
};
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
index 14b1a678e010..64258e8fb5ee 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -43,7 +43,7 @@ struct cam_mem_mgr_request_desc {
* @region : Region to which allocated memory belongs
*/
struct cam_mem_mgr_memory_desc {
- uint64_t kva;
+ uintptr_t kva;
uint32_t iova;
int32_t smmu_hdl;
uint32_t mem_handle;
@@ -92,7 +92,7 @@ int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
*
* @return Status of operation. Negative in case of error. Zero otherwise.
*/
-int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr,
+int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr,
size_t *len);
static inline bool cam_mem_is_secure_buf(int32_t buf_handle)
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 460b3dfe59a2..1295e291ce28 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -1325,9 +1325,9 @@ static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
return NULL;
}
- if (session->num_links >= MAX_LINKS_PER_SESSION) {
+ if (session->num_links >= MAXIMUM_LINKS_PER_SESSION) {
CAM_ERR(CAM_CRM, "Reached max links %d per session limit %d",
- session->num_links, MAX_LINKS_PER_SESSION);
+ session->num_links, MAXIMUM_LINKS_PER_SESSION);
return NULL;
}
@@ -1362,7 +1362,7 @@ static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
mutex_lock(&session->lock);
/* Loop through and find a free index */
- for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+ for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
if (!session->links[i]) {
CAM_DBG(CAM_CRM,
"Free link index %d found, num_links=%d",
@@ -1372,7 +1372,7 @@ static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
}
}
- if (i == MAX_LINKS_PER_SESSION) {
+ if (i == MAXIMUM_LINKS_PER_SESSION) {
CAM_ERR(CAM_CRM, "Free link index not found");
goto error;
}
@@ -1433,7 +1433,7 @@ static void __cam_req_mgr_unreserve_link(
return;
}
- for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+ for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
if (session->links[i] == link)
session->links[i] = NULL;
}
@@ -1445,7 +1445,7 @@ static void __cam_req_mgr_unreserve_link(
* of only having 2 links in a given session
*/
session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
- for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+ for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
if (session->links[i])
session->links[i]->sync_link = NULL;
}
@@ -2387,7 +2387,7 @@ int cam_req_mgr_destroy_session(
ses_info->session_hdl,
cam_session->num_links);
- for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+ for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
link = cam_session->links[i];
if (!link)
@@ -2432,16 +2432,17 @@ int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
return -EINVAL;
}
+ mutex_lock(&g_crm_core_dev->crm_lock);
+
/* session hdl's priv data is cam session struct */
cam_session = (struct cam_req_mgr_core_session *)
cam_get_device_priv(link_info->session_hdl);
if (!cam_session) {
CAM_DBG(CAM_CRM, "NULL pointer");
+ mutex_unlock(&g_crm_core_dev->crm_lock);
return -EINVAL;
}
- mutex_lock(&g_crm_core_dev->crm_lock);
-
/* Allocate link struct and map it with session's request queue */
link = __cam_req_mgr_reserve_link(cam_session);
if (!link) {
@@ -2628,7 +2629,8 @@ int cam_req_mgr_sync_config(
}
if ((sync_info->num_links < 0) ||
- (sync_info->num_links > MAX_LINKS_PER_SESSION)) {
+ (sync_info->num_links >
+ MAX_LINKS_PER_SESSION)) {
CAM_ERR(CAM_CRM, "Invalid num links %d", sync_info->num_links);
return -EINVAL;
}
@@ -2777,6 +2779,13 @@ int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
goto end;
}
+ if (control->num_links > MAX_LINKS_PER_SESSION) {
+ CAM_ERR(CAM_CRM, "Invalid number of links %d",
+ control->num_links);
+ rc = -EINVAL;
+ goto end;
+ }
+
mutex_lock(&g_crm_core_dev->crm_lock);
for (i = 0; i < control->num_links; i++) {
link = (struct cam_req_mgr_core_link *)
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 68ec09b1e89e..8b86931cd1e3 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -34,6 +34,8 @@
#define SYNC_LINK_SOF_CNT_MAX_LMT 1
+#define MAXIMUM_LINKS_PER_SESSION 4
+
/**
* enum crm_workq_task_type
* @codes: to identify which type of task is present
@@ -353,7 +355,7 @@ struct cam_req_mgr_core_link {
struct cam_req_mgr_core_session {
int32_t session_hdl;
uint32_t num_links;
- struct cam_req_mgr_core_link *links[MAX_LINKS_PER_SESSION];
+ struct cam_req_mgr_core_link *links[MAXIMUM_LINKS_PER_SESSION];
struct list_head entry;
struct mutex lock;
int32_t force_err_recovery;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 0d21064afed7..cb60ef4abb5a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -25,6 +25,7 @@
#include "cam_subdev.h"
#include "cam_mem_mgr.h"
#include "cam_debug_util.h"
+#include "cam_common_util.h"
#include <linux/slub_def.h>
#define CAM_REQ_MGR_EVENT_MAX 30
@@ -153,6 +154,10 @@ static unsigned int cam_req_mgr_poll(struct file *f,
static int cam_req_mgr_close(struct file *filep)
{
+ struct v4l2_subdev *sd;
+ struct v4l2_fh *vfh = filep->private_data;
+ struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
+
mutex_lock(&g_dev.cam_lock);
if (g_dev.open_cnt <= 0) {
@@ -161,6 +166,17 @@ static int cam_req_mgr_close(struct file *filep)
}
cam_req_mgr_handle_core_shutdown();
+
+ list_for_each_entry(sd, &g_dev.v4l2_dev->subdevs, list) {
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
+ continue;
+ if (sd->internal_ops && sd->internal_ops->close) {
+ CAM_DBG(CAM_CRM, "Invoke subdev close for device %s",
+ sd->name);
+ sd->internal_ops->close(sd, subdev_fh);
+ }
+ }
+
g_dev.open_cnt--;
v4l2_fh_release(filep);
@@ -220,14 +236,15 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&ses_info,
- (void *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
return -EFAULT;
}
rc = cam_req_mgr_create_session(&ses_info);
if (!rc)
- if (copy_to_user((void *)k_ioctl->handle,
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->handle),
&ses_info, k_ioctl->size))
rc = -EFAULT;
}
@@ -240,7 +257,7 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&ses_info,
- (void *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
return -EFAULT;
}
@@ -256,14 +273,15 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&link_info,
- (void *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
return -EFAULT;
}
rc = cam_req_mgr_link(&link_info);
if (!rc)
- if (copy_to_user((void *)k_ioctl->handle,
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->handle),
&link_info, k_ioctl->size))
rc = -EFAULT;
}
@@ -276,7 +294,7 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&unlink_info,
- (void *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
return -EFAULT;
}
@@ -292,7 +310,7 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&sched_req,
- (void *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
return -EFAULT;
}
@@ -308,7 +326,7 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&flush_info,
- (void *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
return -EFAULT;
}
@@ -324,7 +342,7 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&sync_info,
- (void *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
return -EFAULT;
}
@@ -339,7 +357,7 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&cmd,
- (void *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
rc = -EFAULT;
break;
@@ -347,7 +365,8 @@ static long cam_private_ioctl(struct file *file, void *fh,
rc = cam_mem_mgr_alloc_and_map(&cmd);
if (!rc)
- if (copy_to_user((void *)k_ioctl->handle,
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->handle),
&cmd, k_ioctl->size)) {
rc = -EFAULT;
break;
@@ -361,7 +380,7 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&cmd,
- (void *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
rc = -EFAULT;
break;
@@ -369,7 +388,8 @@ static long cam_private_ioctl(struct file *file, void *fh,
rc = cam_mem_mgr_map(&cmd);
if (!rc)
- if (copy_to_user((void *)k_ioctl->handle,
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->handle),
&cmd, k_ioctl->size)) {
rc = -EFAULT;
break;
@@ -383,7 +403,7 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&cmd,
- (void *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
rc = -EFAULT;
break;
@@ -399,7 +419,7 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&cmd,
- (void *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
rc = -EFAULT;
break;
@@ -417,7 +437,7 @@ static long cam_private_ioctl(struct file *file, void *fh,
return -EINVAL;
if (copy_from_user(&cmd,
- (void __user *)k_ioctl->handle,
+ u64_to_user_ptr(k_ioctl->handle),
k_ioctl->size)) {
rc = -EFAULT;
break;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index 3798ef8e6d5f..68b5569097df 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -228,7 +228,7 @@ int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
crm_workq->task.num_task,
GFP_KERNEL);
if (!crm_workq->task.pool) {
- CAM_WARN(CAM_CRM, "Insufficient memory %lu",
+ CAM_WARN(CAM_CRM, "Insufficient memory %zu",
sizeof(struct crm_workq_task) *
crm_workq->task.num_task);
kfree(crm_workq);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index ed0a26b70eff..c14a74d7c862 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -16,6 +16,7 @@
#include "cam_sensor_util.h"
#include "cam_trace.h"
#include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
int32_t cam_actuator_construct_default_power_setting(
struct cam_sensor_power_ctrl_t *power_info)
@@ -141,7 +142,7 @@ static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
CAM_ERR(CAM_ACTUATOR, "failed: power_info %pK", power_info);
return -EINVAL;
}
- rc = msm_camera_power_down(power_info, soc_info);
+ rc = cam_sensor_util_power_down(power_info, soc_info);
if (rc) {
CAM_ERR(CAM_ACTUATOR, "power down the core is failed:%d", rc);
return rc;
@@ -301,7 +302,7 @@ int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply)
trace_cam_apply_req("Actuator", apply->request_id);
CAM_DBG(CAM_ACTUATOR, "Request Id: %lld", apply->request_id);
-
+ mutex_lock(&(a_ctrl->actuator_mutex));
if ((apply->request_id ==
a_ctrl->i2c_data.per_frame[request_id].request_id) &&
(a_ctrl->i2c_data.per_frame[request_id].is_settings_valid)
@@ -312,7 +313,7 @@ int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply)
CAM_ERR(CAM_ACTUATOR,
"Failed in applying the request: %lld\n",
apply->request_id);
- return rc;
+ goto release_mutex;
}
}
del_req_id = (request_id +
@@ -327,12 +328,14 @@ int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply)
CAM_ERR(CAM_ACTUATOR,
"Fail deleting the req: %d err: %d\n",
del_req_id, rc);
- return rc;
+ goto release_mutex;
}
} else {
CAM_DBG(CAM_ACTUATOR, "No Valid Req to clean Up");
}
+release_mutex:
+ mutex_unlock(&(a_ctrl->actuator_mutex));
return rc;
}
@@ -352,6 +355,8 @@ int32_t cam_actuator_establish_link(
CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
return -EINVAL;
}
+
+ mutex_lock(&(a_ctrl->actuator_mutex));
if (link->link_enable) {
a_ctrl->bridge_intf.link_hdl = link->link_hdl;
a_ctrl->bridge_intf.crm_cb = link->crm_cb;
@@ -359,6 +364,7 @@ int32_t cam_actuator_establish_link(
a_ctrl->bridge_intf.link_hdl = -1;
a_ctrl->bridge_intf.crm_cb = NULL;
}
+ mutex_unlock(&(a_ctrl->actuator_mutex));
return 0;
}
@@ -409,7 +415,7 @@ int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
size_t len_of_buff = 0;
uint32_t *offset = NULL;
uint32_t *cmd_buf = NULL;
- uint64_t generic_ptr;
+ uintptr_t generic_ptr;
struct common_header *cmm_hdr = NULL;
struct cam_control *ioctl_ctrl = NULL;
struct cam_packet *csl_packet = NULL;
@@ -431,11 +437,12 @@ int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
power_info = &soc_private->power_info;
ioctl_ctrl = (struct cam_control *)arg;
- if (copy_from_user(&config, (void __user *) ioctl_ctrl->handle,
+ if (copy_from_user(&config,
+ u64_to_user_ptr(ioctl_ctrl->handle),
sizeof(config)))
return -EFAULT;
rc = cam_mem_get_cpu_buf(config.packet_handle,
- (uint64_t *)&generic_ptr, &len_of_buff);
+ &generic_ptr, &len_of_buff);
if (rc < 0) {
CAM_ERR(CAM_ACTUATOR, "Error in converting command Handle %d",
rc);
@@ -449,7 +456,8 @@ int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
return -EINVAL;
}
- csl_packet = (struct cam_packet *)(generic_ptr + config.offset);
+ csl_packet =
+ (struct cam_packet *)(generic_ptr + (uint32_t)config.offset);
CAM_DBG(CAM_ACTUATOR, "Pkt opcode: %d", csl_packet->header.op_code);
switch (csl_packet->header.op_code & 0xFFFFFF) {
@@ -464,7 +472,7 @@ int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
if (!total_cmd_buf_in_bytes)
continue;
rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
- (uint64_t *)&generic_ptr, &len_of_buff);
+ &generic_ptr, &len_of_buff);
if (rc < 0) {
CAM_ERR(CAM_ACTUATOR, "Failed to get cpu buf");
return rc;
@@ -704,7 +712,7 @@ int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
goto release_mutex;
}
rc = copy_from_user(&actuator_acq_dev,
- (void __user *) cmd->handle,
+ u64_to_user_ptr(cmd->handle),
sizeof(actuator_acq_dev));
if (rc < 0) {
CAM_ERR(CAM_ACTUATOR, "Failed Copying from user\n");
@@ -725,7 +733,8 @@ int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
CAM_DBG(CAM_ACTUATOR, "Device Handle: %d",
actuator_acq_dev.device_handle);
- if (copy_to_user((void __user *) cmd->handle, &actuator_acq_dev,
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &actuator_acq_dev,
sizeof(struct cam_sensor_acquire_dev))) {
CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
rc = -EFAULT;
@@ -778,7 +787,8 @@ int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
struct cam_actuator_query_cap actuator_cap = {0};
actuator_cap.slot_info = a_ctrl->soc_info.index;
- if (copy_to_user((void __user *) cmd->handle, &actuator_cap,
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &actuator_cap,
sizeof(struct cam_actuator_query_cap))) {
CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
rc = -EFAULT;
@@ -891,7 +901,9 @@ int32_t cam_actuator_flush_request(struct cam_req_mgr_flush_request *flush_req)
continue;
if (i2c_set->is_settings_valid == 1) {
+ mutex_lock(&(a_ctrl->actuator_mutex));
rc = delete_request(i2c_set);
+ mutex_unlock(&(a_ctrl->actuator_mutex));
if (rc < 0)
CAM_ERR(CAM_ACTUATOR,
"delete request: %lld rc: %d",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
index c4333a023607..e581823a9add 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,7 @@
#include "cam_sensor_util.h"
#include "cam_soc_util.h"
#include "cam_debug_util.h"
+#include "cam_context.h"
#define NUM_MASTERS 2
#define NUM_QUEUES 2
@@ -92,6 +93,7 @@ struct intf_params {
/**
* struct cam_actuator_ctrl_t
+ * @device_name: Device name
* @i2c_driver: I2C device info
* @pdev: Platform device
* @cci_i2c_master: I2C structure
@@ -107,9 +109,9 @@ struct intf_params {
* @i2c_data: I2C register settings structure
* @act_info: Sensor query cap structure
* @of_node: Node ptr
- * @device_name: Device name
*/
struct cam_actuator_ctrl_t {
+ char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
struct i2c_driver *i2c_driver;
enum cci_i2c_master_t cci_i2c_master;
struct camera_io_master io_master_info;
@@ -123,7 +125,6 @@ struct cam_actuator_ctrl_t {
struct i2c_data_settings i2c_data;
struct cam_actuator_query_cap act_info;
struct intf_params bridge_intf;
- char device_name[20];
};
#endif /* _CAM_ACTUATOR_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
index 8edbea5c2723..e2e79e331322 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
@@ -5,5 +5,6 @@ ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_ut
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy_soc.o cam_csiphy_dev.o cam_csiphy_core.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index 2e9aa6cbb1db..03c7d7b6c007 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include "cam_csiphy_core.h"
#include "cam_csiphy_dev.h"
#include "cam_csiphy_soc.h"
+#include "cam_common_util.h"
#include <soc/qcom/scm.h>
#include <cam_mem_mgr.h>
@@ -21,20 +22,36 @@
#define SCM_SVC_CAMERASS 0x18
#define SECURE_SYSCALL_ID 0x6
+#define SECURE_SYSCALL_ID_2 0x7
+
static int csiphy_dump;
module_param(csiphy_dump, int, 0644);
-static int cam_csiphy_notify_secure_mode(int phy, bool protect)
+static int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
+ bool protect, int32_t offset)
{
struct scm_desc desc = {0};
+ int result = -1;
+ if (offset >= CSIPHY_MAX_INSTANCES)
+ return -EINVAL;
desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
desc.args[0] = protect;
- desc.args[1] = phy;
-
- CAM_DBG(CAM_CSIPHY, "phy : %d, protect : %d", phy, protect);
- if (scm_call2(SCM_SIP_FNID(SCM_SVC_CAMERASS, SECURE_SYSCALL_ID),
- &desc)) {
+ desc.args[1] = csiphy_dev->csiphy_cpas_cp_reg_mask[offset];
+
+ /*
+ * If SECURE_SYSCALL_ID_2 is not supported
+ * then fallback to SECURE_SYSCALL_ID
+ */
+ result = scm_call2(SCM_SIP_FNID(SCM_SVC_CAMERASS, SECURE_SYSCALL_ID_2),
+ &desc);
+ if (result == -EOPNOTSUPP) {
+ desc.args[1] = csiphy_dev->soc_info.index;
+ CAM_ERR(CAM_CSIPHY, "SCM CALL 7 not supported fallback to 6");
+ result = scm_call2(SCM_SIP_FNID(SCM_SVC_CAMERASS,
+ SECURE_SYSCALL_ID), &desc);
+ }
+ if (result) {
CAM_ERR(CAM_CSIPHY, "scm call to hypervisor failed");
return -EINVAL;
}
@@ -42,6 +59,27 @@ static int cam_csiphy_notify_secure_mode(int phy, bool protect)
return 0;
}
+static int32_t cam_csiphy_get_instance_offset(
+ struct csiphy_device *csiphy_dev,
+ int32_t dev_handle)
+{
+ int32_t i;
+
+ if (csiphy_dev->acquire_count >
+ CSIPHY_MAX_INSTANCES) {
+ CAM_ERR(CAM_CSIPHY, "Invalid acquire count");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < csiphy_dev->acquire_count; i++) {
+ if (dev_handle ==
+ csiphy_dev->bridge_intf.device_hdl[i])
+ break;
+ }
+
+ return i;
+}
+
void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev,
struct cam_csiphy_query_cap *csiphy_cap)
{
@@ -75,11 +113,56 @@ void cam_csiphy_reset(struct csiphy_device *csiphy_dev)
}
}
+static int32_t cam_csiphy_update_secure_info(
+ struct csiphy_device *csiphy_dev,
+ struct cam_csiphy_info *cam_cmd_csiphy_info,
+ struct cam_config_dev_cmd *cfg_dev)
+{
+ uint32_t clock_lane, adj_lane_mask, temp;
+ int32_t offset;
+
+ if (csiphy_dev->acquire_count >=
+ CSIPHY_MAX_INSTANCES) {
+ CAM_ERR(CAM_CSIPHY, "Invalid acquire count");
+ return -EINVAL;
+ }
+
+ offset = cam_csiphy_get_instance_offset(csiphy_dev,
+ cfg_dev->dev_handle);
+ if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+ CAM_ERR(CAM_CSIPHY, "Invalid offset");
+ return -EINVAL;
+ }
+
+ if (cam_cmd_csiphy_info->combo_mode)
+ clock_lane =
+ csiphy_dev->ctrl_reg->csiphy_reg.csiphy_combo_clk_lane;
+ else
+ clock_lane =
+ csiphy_dev->ctrl_reg->csiphy_reg.csiphy_clock_lane;
+
+ adj_lane_mask = cam_cmd_csiphy_info->lane_mask & 0x1F &
+ ~clock_lane;
+ temp = adj_lane_mask & (clock_lane - 1);
+ adj_lane_mask =
+ ((adj_lane_mask & (~((clock_lane - 1)))) >> 1) | temp;
+
+ csiphy_dev->csiphy_info.secure_mode[offset] = 1;
+
+ csiphy_dev->csiphy_cpas_cp_reg_mask[offset] =
+ adj_lane_mask << (csiphy_dev->soc_info.index *
+ (CAM_CSIPHY_MAX_DPHY_LANES + CAM_CSIPHY_MAX_CPHY_LANES) +
+ (!cam_cmd_csiphy_info->csiphy_3phase) *
+ (CAM_CSIPHY_MAX_CPHY_LANES));
+
+ return 0;
+}
+
int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev,
struct cam_config_dev_cmd *cfg_dev)
{
int32_t rc = 0;
- uint64_t generic_ptr;
+ uintptr_t generic_ptr;
struct cam_packet *csl_packet = NULL;
struct cam_cmd_buf_desc *cmd_desc = NULL;
uint32_t *cmd_buf = NULL;
@@ -92,7 +175,7 @@ int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev,
}
rc = cam_mem_get_cpu_buf((int32_t) cfg_dev->packet_handle,
- (uint64_t *)&generic_ptr, &len);
+ &generic_ptr, &len);
if (rc < 0) {
CAM_ERR(CAM_CSIPHY, "Failed to get packet Mem address: %d", rc);
return rc;
@@ -105,14 +188,15 @@ int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev,
return -EINVAL;
}
- csl_packet = (struct cam_packet *)(generic_ptr + cfg_dev->offset);
+ csl_packet = (struct cam_packet *)
+ (generic_ptr + (uint32_t)cfg_dev->offset);
cmd_desc = (struct cam_cmd_buf_desc *)
((uint32_t *)&csl_packet->payload +
csl_packet->cmd_buf_offset / 4);
rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
- (uint64_t *)&generic_ptr, &len);
+ &generic_ptr, &len);
if (rc < 0) {
CAM_ERR(CAM_CSIPHY,
"Failed to get cmd buf Mem address : %d", rc);
@@ -136,7 +220,10 @@ int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev,
csiphy_dev->csiphy_info.settle_time =
cam_cmd_csiphy_info->settle_time;
csiphy_dev->csiphy_info.data_rate = cam_cmd_csiphy_info->data_rate;
- csiphy_dev->csiphy_info.secure_mode = cam_cmd_csiphy_info->secure_mode;
+
+ if (cam_cmd_csiphy_info->secure_mode == 1)
+ cam_csiphy_update_secure_info(csiphy_dev,
+ cam_cmd_csiphy_info, cfg_dev);
return rc;
}
@@ -330,8 +417,10 @@ int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev)
CAM_DBG(CAM_CSIPHY, "Do Nothing");
break;
}
- usleep_range(reg_array[lane_pos][i].delay*1000,
- reg_array[lane_pos][i].delay*1000 + 1000);
+ if (reg_array[lane_pos][i].delay > 0) {
+ usleep_range(reg_array[lane_pos][i].delay*1000,
+ reg_array[lane_pos][i].delay*1000 + 10);
+ }
}
lane_mask >>= 1;
lane_pos++;
@@ -345,6 +434,7 @@ int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev)
void cam_csiphy_shutdown(struct csiphy_device *csiphy_dev)
{
struct cam_hw_soc_info *soc_info;
+ int32_t i = 0;
if (csiphy_dev->csiphy_state == CAM_CSIPHY_INIT)
return;
@@ -352,13 +442,17 @@ void cam_csiphy_shutdown(struct csiphy_device *csiphy_dev)
if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) {
soc_info = &csiphy_dev->soc_info;
- if (csiphy_dev->csiphy_info.secure_mode)
- cam_csiphy_notify_secure_mode(
- csiphy_dev->soc_info.index,
- CAM_SECURE_MODE_NON_SECURE);
+ for (i = 0; i < csiphy_dev->acquire_count; i++) {
+ if (csiphy_dev->csiphy_info.secure_mode[i])
+ cam_csiphy_notify_secure_mode(
+ csiphy_dev,
+ CAM_SECURE_MODE_NON_SECURE, i);
- csiphy_dev->csiphy_info.secure_mode =
- CAM_SECURE_MODE_NON_SECURE;
+ csiphy_dev->csiphy_info.secure_mode[i] =
+ CAM_SECURE_MODE_NON_SECURE;
+
+ csiphy_dev->csiphy_cpas_cp_reg_mask[i] = 0;
+ }
cam_csiphy_reset(csiphy_dev);
cam_soc_util_disable_platform_resource(soc_info, true, true);
@@ -396,7 +490,7 @@ static int32_t cam_csiphy_external_cmd(struct csiphy_device *csiphy_dev,
int32_t rc = 0;
if (copy_from_user(&cam_cmd_csiphy_info,
- (void __user *)p_submit_cmd->packet_handle,
+ u64_to_user_ptr(p_submit_cmd->packet_handle),
sizeof(struct cam_csiphy_info))) {
CAM_ERR(CAM_CSIPHY, "failed to copy cam_csiphy_info\n");
rc = -EFAULT;
@@ -456,7 +550,7 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
struct cam_create_dev_hdl bridge_params;
rc = copy_from_user(&csiphy_acq_dev,
- (void __user *)cmd->handle,
+ u64_to_user_ptr(cmd->handle),
sizeof(csiphy_acq_dev));
if (rc < 0) {
CAM_ERR(CAM_CSIPHY, "Failed copying from User");
@@ -466,7 +560,7 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
csiphy_acq_params.combo_mode = 0;
if (copy_from_user(&csiphy_acq_params,
- (void __user *)csiphy_acq_dev.info_handle,
+ u64_to_user_ptr(csiphy_acq_dev.info_handle),
sizeof(csiphy_acq_params))) {
CAM_ERR(CAM_CSIPHY,
"Failed copying from User");
@@ -522,7 +616,7 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
bridge_intf->session_hdl[csiphy_acq_params.combo_mode] =
csiphy_acq_dev.session_handle;
- if (copy_to_user((void __user *)cmd->handle,
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
&csiphy_acq_dev,
sizeof(struct cam_sensor_acquire_dev))) {
CAM_ERR(CAM_CSIPHY, "Failed copying from User");
@@ -540,7 +634,7 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
struct cam_csiphy_query_cap csiphy_cap = {0};
cam_csiphy_query_cap(csiphy_dev, &csiphy_cap);
- if (copy_to_user((void __user *)cmd->handle,
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
&csiphy_cap, sizeof(struct cam_csiphy_query_cap))) {
CAM_ERR(CAM_CSIPHY, "Failed copying from User");
rc = -EINVAL;
@@ -549,6 +643,16 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
}
break;
case CAM_STOP_DEV: {
+ int32_t offset, rc = 0;
+ struct cam_start_stop_dev_cmd config;
+
+ rc = copy_from_user(&config, u64_to_user_ptr(cmd->handle),
+ sizeof(config));
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+ goto release_mutex;
+ }
+
if ((csiphy_dev->csiphy_state != CAM_CSIPHY_START) ||
!csiphy_dev->start_dev_count) {
CAM_ERR(CAM_CSIPHY, "Not in right state to stop : %d",
@@ -556,20 +660,38 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
goto release_mutex;
}
+ offset = cam_csiphy_get_instance_offset(csiphy_dev,
+ config.dev_handle);
+ if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+ CAM_ERR(CAM_CSIPHY, "Invalid offset");
+ goto release_mutex;
+ }
+
if (--csiphy_dev->start_dev_count) {
CAM_DBG(CAM_CSIPHY, "Stop Dev ref Cnt: %d",
csiphy_dev->start_dev_count);
+ if (csiphy_dev->csiphy_info.secure_mode[offset])
+ cam_csiphy_notify_secure_mode(
+ csiphy_dev,
+ CAM_SECURE_MODE_NON_SECURE, offset);
+
+ csiphy_dev->csiphy_info.secure_mode[offset] =
+ CAM_SECURE_MODE_NON_SECURE;
+ csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0;
+
goto release_mutex;
}
- if (csiphy_dev->csiphy_info.secure_mode)
+ if (csiphy_dev->csiphy_info.secure_mode[offset])
cam_csiphy_notify_secure_mode(
- csiphy_dev->soc_info.index,
- CAM_SECURE_MODE_NON_SECURE);
+ csiphy_dev,
+ CAM_SECURE_MODE_NON_SECURE, offset);
- csiphy_dev->csiphy_info.secure_mode =
+ csiphy_dev->csiphy_info.secure_mode[offset] =
CAM_SECURE_MODE_NON_SECURE;
+ csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0x0;
+
rc = cam_csiphy_disable_hw(csiphy_dev);
if (rc < 0)
CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
@@ -590,7 +712,8 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
goto release_mutex;
}
- if (copy_from_user(&release, (void __user *) cmd->handle,
+ if (copy_from_user(&release,
+ u64_to_user_ptr(cmd->handle),
sizeof(release))) {
rc = -EFAULT;
goto release_mutex;
@@ -628,7 +751,8 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
case CAM_CONFIG_DEV: {
struct cam_config_dev_cmd config;
- if (copy_from_user(&config, (void __user *)cmd->handle,
+ if (copy_from_user(&config,
+ u64_to_user_ptr(cmd->handle),
sizeof(config))) {
rc = -EFAULT;
} else {
@@ -643,12 +767,28 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
case CAM_START_DEV: {
struct cam_ahb_vote ahb_vote;
struct cam_axi_vote axi_vote;
+ struct cam_start_stop_dev_cmd config;
+ int32_t offset;
+
+ rc = copy_from_user(&config, u64_to_user_ptr(cmd->handle),
+ sizeof(config));
+ if (rc < 0) {
+ CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+ goto release_mutex;
+ }
if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) {
csiphy_dev->start_dev_count++;
goto release_mutex;
}
+ offset = cam_csiphy_get_instance_offset(csiphy_dev,
+ config.dev_handle);
+ if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+ CAM_ERR(CAM_CSIPHY, "Invalid offset");
+ goto release_mutex;
+ }
+
ahb_vote.type = CAM_VOTE_ABSOLUTE;
ahb_vote.vote.level = CAM_SVS_VOTE;
axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
@@ -661,12 +801,12 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
goto release_mutex;
}
- if (csiphy_dev->csiphy_info.secure_mode) {
+ if (csiphy_dev->csiphy_info.secure_mode[offset] == 1) {
rc = cam_csiphy_notify_secure_mode(
- csiphy_dev->soc_info.index,
- CAM_SECURE_MODE_SECURE);
+ csiphy_dev,
+ CAM_SECURE_MODE_SECURE, offset);
if (rc < 0)
- csiphy_dev->csiphy_info.secure_mode =
+ csiphy_dev->csiphy_info.secure_mode[offset] =
CAM_SECURE_MODE_NON_SECURE;
}
@@ -694,7 +834,7 @@ int32_t cam_csiphy_core_cfg(void *phy_dev,
struct cam_config_dev_cmd submit_cmd;
if (copy_from_user(&submit_cmd,
- (void __user *)cmd->handle,
+ u64_to_user_ptr(cmd->handle),
sizeof(struct cam_config_dev_cmd))) {
CAM_ERR(CAM_CSIPHY, "failed copy config ext\n");
rc = -EFAULT;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
index 9c85af39bd8c..5ece65112ea9 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
#include <cam_cpas_api.h>
#include "cam_soc_util.h"
#include "cam_debug_util.h"
+#include "cam_context.h"
#define MAX_CSIPHY 3
#define MAX_DPHY_DATA_LN 4
@@ -58,6 +59,11 @@
#define CSIPHY_SETTLE_CNT_HIGHER_BYTE 3
#define CSIPHY_DNP_PARAMS 4
+#define CSIPHY_MAX_INSTANCES 2
+
+#define CAM_CSIPHY_MAX_DPHY_LANES 4
+#define CAM_CSIPHY_MAX_CPHY_LANES 3
+
#define ENABLE_IRQ false
#undef CDBG
@@ -101,6 +107,12 @@ struct csiphy_reg_parms_t {
uint32_t csiphy_reset_array_size;
uint32_t csiphy_2ph_config_array_size;
uint32_t csiphy_3ph_config_array_size;
+ uint32_t csiphy_cpas_cp_bits_per_phy;
+ uint32_t csiphy_cpas_cp_is_interleaved;
+ uint32_t csiphy_cpas_cp_2ph_offset;
+ uint32_t csiphy_cpas_cp_3ph_offset;
+ uint32_t csiphy_clock_lane;
+ uint32_t csiphy_combo_clk_lane;
};
/**
@@ -111,9 +123,9 @@ struct csiphy_reg_parms_t {
* @crm_cb: Callback API pointers
*/
struct intf_params {
- int32_t device_hdl[2];
- int32_t session_hdl[2];
- int32_t link_hdl[2];
+ int32_t device_hdl[CSIPHY_MAX_INSTANCES];
+ int32_t session_hdl[CSIPHY_MAX_INSTANCES];
+ int32_t link_hdl[CSIPHY_MAX_INSTANCES];
struct cam_req_mgr_kmd_ops ops;
struct cam_req_mgr_crm_cb *crm_cb;
};
@@ -175,7 +187,7 @@ struct cam_csiphy_param {
uint8_t csiphy_3phase;
uint8_t combo_mode;
uint8_t lane_cnt;
- uint8_t secure_mode;
+ uint8_t secure_mode[CSIPHY_MAX_INSTANCES];
uint64_t settle_time;
uint64_t settle_time_combo_sensor;
uint64_t data_rate;
@@ -183,6 +195,7 @@ struct cam_csiphy_param {
/**
* struct csiphy_device
+ * @device_name: Device name
* @pdev: Platform device
* @irq: Interrupt structure
* @base: Base address
@@ -208,6 +221,7 @@ struct cam_csiphy_param {
* device is for combo mode
*/
struct csiphy_device {
+ char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
struct mutex mutex;
uint32_t hw_version;
enum cam_csiphy_state csiphy_state;
@@ -226,11 +240,11 @@ struct csiphy_device {
uint32_t clk_lane;
uint32_t acquire_count;
uint32_t start_dev_count;
- char device_name[20];
uint32_t is_acquired_dev_combo_mode;
struct cam_hw_soc_info soc_info;
uint32_t cpas_handle;
uint32_t config_count;
+ uint64_t csiphy_cpas_cp_reg_mask[CSIPHY_MAX_INSTANCES];
};
#endif /* _CAM_CSIPHY_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
index 324509340054..82cff279fec4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
@@ -23,6 +23,8 @@ struct csiphy_reg_parms_t csiphy_v1_0 = {
.csiphy_reset_array_size = 5,
.csiphy_2ph_config_array_size = 14,
.csiphy_3ph_config_array_size = 19,
+ .csiphy_clock_lane = 0x1,
+ .csiphy_combo_clk_lane = 0x10,
};
struct csiphy_reg_t csiphy_common_reg_1_0[] = {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
index 5490992365fc..75172556ed60 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
@@ -5,4 +5,6 @@ ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_ut
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+
obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom_dev.o cam_eeprom_core.o cam_eeprom_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index c8730cab765c..277418a46d3d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -17,6 +17,7 @@
#include "cam_eeprom_core.h"
#include "cam_eeprom_soc.h"
#include "cam_debug_util.h"
+#include "cam_common_util.h"
/**
* cam_eeprom_read_memory() - read map data into buffer
@@ -221,7 +222,7 @@ static int cam_eeprom_power_down(struct cam_eeprom_ctrl_t *e_ctrl)
CAM_ERR(CAM_EEPROM, "failed: power_info %pK", power_info);
return -EINVAL;
}
- rc = msm_camera_power_down(power_info, soc_info);
+ rc = cam_sensor_util_power_down(power_info, soc_info);
if (rc) {
CAM_ERR(CAM_EEPROM, "power down the core is failed:%d", rc);
return rc;
@@ -341,7 +342,8 @@ static int32_t cam_eeprom_get_dev_handle(struct cam_eeprom_ctrl_t *e_ctrl,
CAM_ERR(CAM_EEPROM, "Device is already acquired");
return -EFAULT;
}
- if (copy_from_user(&eeprom_acq_dev, (void __user *) cmd->handle,
+ if (copy_from_user(&eeprom_acq_dev,
+ u64_to_user_ptr(cmd->handle),
sizeof(eeprom_acq_dev))) {
CAM_ERR(CAM_EEPROM,
"EEPROM:ACQUIRE_DEV: copy from user failed");
@@ -360,8 +362,8 @@ static int32_t cam_eeprom_get_dev_handle(struct cam_eeprom_ctrl_t *e_ctrl,
e_ctrl->bridge_intf.session_hdl = eeprom_acq_dev.session_handle;
CAM_DBG(CAM_EEPROM, "Device Handle: %d", eeprom_acq_dev.device_handle);
- if (copy_to_user((void __user *) cmd->handle, &eeprom_acq_dev,
- sizeof(struct cam_sensor_acquire_dev))) {
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &eeprom_acq_dev, sizeof(struct cam_sensor_acquire_dev))) {
CAM_ERR(CAM_EEPROM, "EEPROM:ACQUIRE_DEV: copy to user failed");
return -EFAULT;
}
@@ -530,7 +532,7 @@ static int32_t cam_eeprom_init_pkt_parser(struct cam_eeprom_ctrl_t *e_ctrl,
struct cam_cmd_buf_desc *cmd_desc = NULL;
uint32_t *offset = NULL;
uint32_t *cmd_buf = NULL;
- uint64_t generic_pkt_addr;
+ uintptr_t generic_pkt_addr;
size_t pkt_len = 0;
uint32_t total_cmd_buf_in_bytes = 0;
uint32_t processed_cmd_buf_in_bytes = 0;
@@ -564,7 +566,7 @@ static int32_t cam_eeprom_init_pkt_parser(struct cam_eeprom_ctrl_t *e_ctrl,
if (!total_cmd_buf_in_bytes)
continue;
rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
- (uint64_t *)&generic_pkt_addr, &pkt_len);
+ &generic_pkt_addr, &pkt_len);
if (rc) {
CAM_ERR(CAM_EEPROM, "Failed to get cpu buf");
return rc;
@@ -641,7 +643,7 @@ static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
struct cam_buf_io_cfg *io_cfg;
uint32_t i = 0;
int rc = 0;
- uint64_t buf_addr;
+ uintptr_t buf_addr;
size_t buf_size;
uint8_t *read_buffer;
@@ -656,7 +658,7 @@ static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
CAM_DBG(CAM_EEPROM, "Direction: %d:", io_cfg->direction);
if (io_cfg->direction == CAM_BUF_OUTPUT) {
rc = cam_mem_get_cpu_buf(io_cfg->mem_handle[0],
- (uint64_t *)&buf_addr, &buf_size);
+ &buf_addr, &buf_size);
CAM_DBG(CAM_EEPROM, "buf_addr : %pK, buf_size : %zu\n",
(void *)buf_addr, buf_size);
@@ -699,7 +701,7 @@ static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
int32_t rc = 0;
struct cam_control *ioctl_ctrl = NULL;
struct cam_config_dev_cmd dev_config;
- uint64_t generic_pkt_addr;
+ uintptr_t generic_pkt_addr;
size_t pkt_len;
struct cam_packet *csl_packet = NULL;
struct cam_eeprom_soc_private *soc_private =
@@ -708,11 +710,12 @@ static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
ioctl_ctrl = (struct cam_control *)arg;
- if (copy_from_user(&dev_config, (void __user *) ioctl_ctrl->handle,
+ if (copy_from_user(&dev_config,
+ u64_to_user_ptr(ioctl_ctrl->handle),
sizeof(dev_config)))
return -EFAULT;
rc = cam_mem_get_cpu_buf(dev_config.packet_handle,
- (uint64_t *)&generic_pkt_addr, &pkt_len);
+ &generic_pkt_addr, &pkt_len);
if (rc) {
CAM_ERR(CAM_EEPROM,
"error in converting command Handle Error: %d", rc);
@@ -727,7 +730,7 @@ static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
}
csl_packet = (struct cam_packet *)
- (generic_pkt_addr + dev_config.offset);
+ (generic_pkt_addr + (uint32_t)dev_config.offset);
switch (csl_packet->header.op_code & 0xFFFFFF) {
case CAM_EEPROM_PACKET_OPCODE_INIT:
if (e_ctrl->userspace_probe == false) {
@@ -880,7 +883,7 @@ int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
else
eeprom_cap.eeprom_kernel_probe = false;
- if (copy_to_user((void __user *) cmd->handle,
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
&eeprom_cap,
sizeof(struct cam_eeprom_query_cap_t))) {
CAM_ERR(CAM_EEPROM, "Failed Copy to User");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
index cc34a70893db..6d8820abb7d7 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -261,9 +261,10 @@ static int cam_eeprom_i2c_driver_remove(struct i2c_client *client)
for (i = 0; i < soc_info->num_clk; i++)
devm_clk_put(soc_info->dev, soc_info->clk[i]);
- if (soc_private)
- kfree(soc_private);
-
+ mutex_destroy(&(e_ctrl->eeprom_mutex));
+ kfree(soc_private);
+ kfree(e_ctrl->io_master_info.cci_client);
+ v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
kfree(e_ctrl);
return 0;
@@ -394,6 +395,8 @@ static int cam_eeprom_spi_driver_remove(struct spi_device *sdev)
kfree(soc_private->power_info.gpio_num_info);
kfree(soc_private);
}
+ mutex_destroy(&(e_ctrl->eeprom_mutex));
+ v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
kfree(e_ctrl);
return 0;
@@ -489,8 +492,11 @@ static int cam_eeprom_platform_driver_remove(struct platform_device *pdev)
for (i = 0; i < soc_info->num_clk; i++)
devm_clk_put(soc_info->dev, soc_info->clk[i]);
+ mutex_destroy(&(e_ctrl->eeprom_mutex));
kfree(soc_info->soc_private);
kfree(e_ctrl->io_master_info.cci_client);
+ platform_set_drvdata(pdev, NULL);
+ v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
kfree(e_ctrl);
return 0;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
index 4a2190da7c70..3adb7ea002b7 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,7 @@
#include <cam_mem_mgr.h>
#include <cam_subdev.h>
#include "cam_soc_util.h"
+#include "cam_context.h"
#define DEFINE_MSM_MUTEX(mutexname) \
static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
@@ -151,6 +152,7 @@ struct cam_eeprom_intf_params {
/**
* struct cam_cmd_conditional_wait - Conditional wait command
+ * @device_name : Device name
* @pdev : platform device
* @spi : spi device
* @eeprom_mutex : eeprom mutex
@@ -163,10 +165,10 @@ struct cam_eeprom_intf_params {
* @cam_eeprom_state: eeprom_device_state
* @userspace_probe : flag indicates userspace or kernel probe
* @cal_data : Calibration data
- * @device_name : Device name
*
*/
struct cam_eeprom_ctrl_t {
+ char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
struct platform_device *pdev;
struct spi_device *spi;
struct mutex eeprom_mutex;
@@ -180,7 +182,6 @@ struct cam_eeprom_ctrl_t {
enum cam_eeprom_state cam_eeprom_state;
bool userspace_probe;
struct cam_eeprom_memory_block_t cal_data;
- char device_name[20];
};
int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
index c7889a5fc2f5..4d1cbdc3c5a2 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
@@ -1,10 +1,11 @@
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
-ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash_dev.o cam_flash_core.o cam_flash_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index e997168d62ea..9ed9536789df 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,8 +15,9 @@
#include "cam_sensor_cmn_header.h"
#include "cam_flash_core.h"
#include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
-int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
+static int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
bool regulator_enable)
{
int rc = 0;
@@ -55,7 +56,7 @@ int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
return rc;
}
-static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl)
+static int cam_flash_pmic_flush_nrt(struct cam_flash_ctrl *fctrl)
{
int j = 0;
struct cam_flash_frame_setting *nrt_settings;
@@ -86,20 +87,187 @@ static int cam_flash_flush_nrt(struct cam_flash_ctrl *fctrl)
return 0;
}
-int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
+static int cam_flash_i2c_flush_nrt(struct cam_flash_ctrl *fctrl)
+{
+ int rc = 0;
+
+ if (fctrl->i2c_data.init_settings.is_settings_valid == true) {
+ rc = delete_request(&fctrl->i2c_data.init_settings);
+ if (rc) {
+ CAM_WARN(CAM_FLASH,
+ "Failed to delete Init i2c_setting: %d",
+ rc);
+ return rc;
+ }
+ }
+ if (fctrl->i2c_data.config_settings.is_settings_valid == true) {
+ rc = delete_request(&fctrl->i2c_data.config_settings);
+ if (rc) {
+ CAM_WARN(CAM_FLASH,
+ "Failed to delete NRT i2c_setting: %d",
+ rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int cam_flash_construct_default_power_setting(
+ struct cam_sensor_power_ctrl_t *power_info)
+{
+ int rc = 0;
+
+ power_info->power_setting_size = 1;
+ power_info->power_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_setting)
+ return -ENOMEM;
+
+ power_info->power_setting[0].seq_type = SENSOR_CUSTOM_REG1;
+ power_info->power_setting[0].seq_val = CAM_V_CUSTOM1;
+ power_info->power_setting[0].config_val = 0;
+ power_info->power_setting[0].delay = 2;
+
+ power_info->power_down_setting_size = 1;
+ power_info->power_down_setting =
+ (struct cam_sensor_power_setting *)
+ kzalloc(sizeof(struct cam_sensor_power_setting),
+ GFP_KERNEL);
+ if (!power_info->power_down_setting) {
+ rc = -ENOMEM;
+ goto free_power_settings;
+ }
+
+ power_info->power_down_setting[0].seq_type = SENSOR_CUSTOM_REG1;
+ power_info->power_down_setting[0].seq_val = CAM_V_CUSTOM1;
+ power_info->power_down_setting[0].config_val = 0;
+
+ return rc;
+
+free_power_settings:
+ kfree(power_info->power_setting);
+ power_info->power_setting = NULL;
+ power_info->power_setting_size = 0;
+ return rc;
+}
+
+int cam_flash_pmic_power_ops(struct cam_flash_ctrl *fctrl,
+ bool regulator_enable)
+{
+ int rc = 0;
+
+ if (!(fctrl->switch_trigger)) {
+ CAM_ERR(CAM_FLASH, "Invalid argument");
+ return -EINVAL;
+ }
+
+ if (regulator_enable) {
+ rc = cam_flash_prepare(fctrl, true);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Enable Regulator Failed rc = %d", rc);
+ return rc;
+ }
+ }
+
+ if (!regulator_enable) {
+ if ((fctrl->flash_state == CAM_FLASH_STATE_START) &&
+ (fctrl->is_regulator_enabled == true)) {
+ rc = cam_flash_prepare(fctrl, false);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "Disable Regulator Failed rc: %d", rc);
+ }
+ }
+
+ return rc;
+}
+
+int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl,
+ bool regulator_enable)
+{
+ int rc = 0;
+ struct cam_hw_soc_info *soc_info = &fctrl->soc_info;
+ struct cam_sensor_power_ctrl_t *power_info =
+ &fctrl->power_info;
+
+ if (!power_info || !soc_info) {
+ CAM_ERR(CAM_FLASH, "Power Info is NULL");
+ return -EINVAL;
+ }
+ power_info->dev = soc_info->dev;
+
+ if (regulator_enable && (fctrl->is_regulator_enabled == false)) {
+ if ((power_info->power_setting == NULL) &&
+ (power_info->power_down_setting == NULL)) {
+ CAM_INFO(CAM_FLASH,
+ "Using default power settings");
+ rc = cam_flash_construct_default_power_setting(
+ power_info);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH,
+ "Construct default pwr setting failed rc: %d",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = cam_sensor_core_power_up(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "power up the core is failed:%d",
+ rc);
+ goto free_pwr_settings;
+ }
+
+ rc = camera_io_init(&(fctrl->io_master_info));
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "cci_init failed: rc: %d", rc);
+ cam_sensor_util_power_down(power_info, soc_info);
+ goto free_pwr_settings;
+ }
+ fctrl->is_regulator_enabled = true;
+ } else if ((!regulator_enable) &&
+ (fctrl->is_regulator_enabled == true)) {
+ rc = cam_sensor_util_power_down(power_info, soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "power down the core is failed:%d",
+ rc);
+ return rc;
+ }
+ camera_io_release(&(fctrl->io_master_info));
+ fctrl->is_regulator_enabled = false;
+ goto free_pwr_settings;
+ }
+ return rc;
+
+free_pwr_settings:
+ kfree(power_info->power_setting);
+ kfree(power_info->power_down_setting);
+ power_info->power_setting = NULL;
+ power_info->power_down_setting = NULL;
+ power_info->power_setting_size = 0;
+ power_info->power_down_setting_size = 0;
+
+ return rc;
+}
+
+int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl,
+ enum cam_flash_flush_type type, uint64_t req_id)
{
int rc = 0;
int i = 0, j = 0;
- struct cam_flash_ctrl *fctrl = NULL;
int frame_offset = 0;
- fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
if (!fctrl) {
CAM_ERR(CAM_FLASH, "Device data is NULL");
return -EINVAL;
}
- if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ if (type == FLUSH_ALL) {
+ cam_flash_off(fctrl);
/* flush all requests*/
for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
fctrl->per_frame[i].cmn_attr.request_id = 0;
@@ -109,19 +277,105 @@ int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
fctrl->per_frame[i].led_current_ma[j] = 0;
}
- rc = cam_flash_flush_nrt(fctrl);
- if (rc)
- CAM_ERR(CAM_FLASH, "NonRealTime flush error");
- } else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ cam_flash_pmic_flush_nrt(fctrl);
+ } else if ((type == FLUSH_REQ) && (req_id != 0)) {
/* flush request with req_id*/
- frame_offset = flush->req_id % MAX_PER_FRAME_ARRAY;
+ frame_offset = req_id % MAX_PER_FRAME_ARRAY;
fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
false;
fctrl->per_frame[frame_offset].cmn_attr.count = 0;
for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++)
fctrl->per_frame[frame_offset].led_current_ma[i] = 0;
+ } else if ((type == FLUSH_REQ) && (req_id == 0)) {
+ /* Handels NonRealTime usecase */
+ cam_flash_pmic_flush_nrt(fctrl);
+ } else {
+ CAM_ERR(CAM_FLASH, "Invalid arguments");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl,
+ enum cam_flash_flush_type type, uint64_t req_id)
+{
+ int rc = 0;
+ int i = 0;
+ uint32_t cancel_req_id_found = 0;
+ struct i2c_settings_array *i2c_set = NULL;
+
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Device data is NULL");
+ return -EINVAL;
+ }
+ if ((type == FLUSH_REQ) && (req_id == 0)) {
+ /* This setting will be called only when NonRealTime
+ * settings needs to clean.
+ */
+ cam_flash_i2c_flush_nrt(fctrl);
+ } else {
+ /* All other usecase will be handle here */
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ i2c_set = &(fctrl->i2c_data.per_frame[i]);
+
+ if ((type == FLUSH_REQ) &&
+ (i2c_set->request_id != req_id))
+ continue;
+
+ if (i2c_set->is_settings_valid == 1) {
+ rc = delete_request(i2c_set);
+ if (rc < 0)
+ CAM_ERR(CAM_FLASH,
+ "delete request: %lld rc: %d",
+ i2c_set->request_id, rc);
+
+ if (type == FLUSH_REQ) {
+ cancel_req_id_found = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ if ((type == FLUSH_REQ) && (req_id != 0) &&
+ (!cancel_req_id_found))
+ CAM_DBG(CAM_FLASH,
+ "Flush request id:%lld not found in the pending list",
+ req_id);
+
+ return rc;
+}
+
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
+{
+ int rc = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
+
+ fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Device data is NULL");
+ return -EINVAL;
+ }
+
+ mutex_lock(&fctrl->flash_mutex);
+ if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+ rc = fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "FLUSH_TYPE_ALL failed rc: %d", rc);
+ goto end;
+ }
+ } else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+ rc = fctrl->func_tbl.flush_req(fctrl,
+ FLUSH_REQ, flush->req_id);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "FLUSH_REQ failed rc: %d", rc);
+ goto end;
+ }
}
+end:
+ mutex_unlock(&fctrl->flash_mutex);
return rc;
}
@@ -184,7 +438,7 @@ static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
if (flash_ctrl->switch_trigger)
cam_res_mgr_led_trigger_event(
flash_ctrl->switch_trigger,
- LED_SWITCH_ON);
+ (enum led_brightness)LED_SWITCH_ON);
return 0;
}
@@ -198,7 +452,7 @@ int cam_flash_off(struct cam_flash_ctrl *flash_ctrl)
if (flash_ctrl->switch_trigger)
cam_res_mgr_led_trigger_event(flash_ctrl->switch_trigger,
- LED_SWITCH_OFF);
+ (enum led_brightness)LED_SWITCH_OFF);
flash_ctrl->flash_state = CAM_FLASH_STATE_START;
return 0;
@@ -254,26 +508,51 @@ static int cam_flash_high(
return rc;
}
-static int delete_req(struct cam_flash_ctrl *fctrl, uint64_t req_id)
+static int cam_flash_i2c_delete_req(struct cam_flash_ctrl *fctrl,
+ uint64_t req_id)
+{
+ int i = 0, rc = 0;
+ uint64_t top = 0, del_req_id = 0;
+
+ if (req_id != 0) {
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+ if ((req_id >=
+ fctrl->i2c_data.per_frame[i].request_id) &&
+ (top <
+ fctrl->i2c_data.per_frame[i].request_id) &&
+ (fctrl->i2c_data.per_frame[i].is_settings_valid
+ == 1)) {
+ del_req_id = top;
+ top = fctrl->i2c_data.per_frame[i].request_id;
+ }
+ }
+
+ if (top < req_id) {
+ if ((((top % MAX_PER_FRAME_ARRAY) - (req_id %
+ MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) ||
+ (((top % MAX_PER_FRAME_ARRAY) - (req_id %
+ MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX))
+ del_req_id = req_id;
+ }
+
+ if (!del_req_id)
+ return rc;
+
+ CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu",
+ top, del_req_id);
+ }
+ fctrl->func_tbl.flush_req(fctrl, FLUSH_REQ, del_req_id);
+ return 0;
+}
+
+static int cam_flash_pmic_delete_req(struct cam_flash_ctrl *fctrl,
+ uint64_t req_id)
{
int i = 0;
struct cam_flash_frame_setting *flash_data = NULL;
uint64_t top = 0, del_req_id = 0;
- if (req_id == 0) {
- flash_data = &fctrl->nrt_info;
- if ((fctrl->nrt_info.cmn_attr.cmd_type ==
- CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
- (fctrl->nrt_info.cmn_attr.cmd_type ==
- CAMERA_SENSOR_FLASH_CMD_TYPE_RER)) {
- flash_data->cmn_attr.is_settings_valid = false;
- for (i = 0; i < flash_data->cmn_attr.count; i++)
- flash_data->led_current_ma[i] = 0;
- } else {
- fctrl->flash_init_setting.cmn_attr.is_settings_valid
- = false;
- }
- } else {
+ if (req_id != 0) {
for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
flash_data = &fctrl->per_frame[i];
if (req_id >= flash_data->cmn_attr.request_id &&
@@ -305,28 +584,100 @@ static int delete_req(struct cam_flash_ctrl *fctrl, uint64_t req_id)
CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu",
top, del_req_id);
+ }
- for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
- flash_data = &fctrl->per_frame[i];
- if ((del_req_id ==
- flash_data->cmn_attr.request_id) &&
- (flash_data->cmn_attr.
- is_settings_valid == 1)) {
- CAM_DBG(CAM_FLASH, "Deleting request[%d] %llu",
- i, flash_data->cmn_attr.request_id);
- flash_data->cmn_attr.request_id = 0;
- flash_data->cmn_attr.is_settings_valid = false;
- flash_data->opcode = 0;
- for (i = 0; i < flash_data->cmn_attr.count; i++)
- flash_data->led_current_ma[i] = 0;
+ fctrl->func_tbl.flush_req(fctrl, FLUSH_REQ, del_req_id);
+ return 0;
+}
+
+static int32_t cam_flash_slaveInfo_pkt_parser(struct cam_flash_ctrl *fctrl,
+ uint32_t *cmd_buf)
+{
+ int32_t rc = 0;
+ struct cam_cmd_i2c_info *i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+
+ if (fctrl->io_master_info.master_type == CCI_MASTER) {
+ fctrl->io_master_info.cci_client->cci_i2c_master =
+ fctrl->cci_i2c_master;
+ fctrl->io_master_info.cci_client->i2c_freq_mode =
+ i2c_info->i2c_freq_mode;
+ fctrl->io_master_info.cci_client->sid =
+ i2c_info->slave_addr >> 1;
+ CAM_DBG(CAM_FLASH, "Slave addr: 0x%x Freq Mode: %d",
+ i2c_info->slave_addr, i2c_info->i2c_freq_mode);
+ } else if (fctrl->io_master_info.master_type == I2C_MASTER) {
+ fctrl->io_master_info.client->addr = i2c_info->slave_addr;
+ CAM_DBG(CAM_FLASH, "Slave addr: 0x%x", i2c_info->slave_addr);
+ } else {
+ CAM_ERR(CAM_FLASH, "Invalid Master type: %d",
+ fctrl->io_master_info.master_type);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl,
+ uint64_t req_id)
+{
+ struct i2c_settings_list *i2c_list;
+ struct i2c_settings_array *i2c_set = NULL;
+ int frame_offset = 0, rc = 0;
+
+ if (req_id == 0) {
+ /* NonRealTime Init settings*/
+ if (fctrl->i2c_data.init_settings.is_settings_valid == true) {
+ list_for_each_entry(i2c_list,
+ &(fctrl->i2c_data.init_settings.list_head),
+ list) {
+ rc = cam_sensor_util_i2c_apply_setting
+ (&(fctrl->io_master_info), i2c_list);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to apply init settings: %d",
+ rc);
+ return rc;
+ }
+ }
+ }
+ /* NonRealTime (Widget/RER/INIT_FIRE settings) */
+ if (fctrl->i2c_data.config_settings.is_settings_valid == true) {
+ list_for_each_entry(i2c_list,
+ &(fctrl->i2c_data.config_settings.list_head),
+ list) {
+ rc = cam_sensor_util_i2c_apply_setting
+ (&(fctrl->io_master_info), i2c_list);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to apply NRT settings: %d", rc);
+ return rc;
+ }
+ }
+ }
+ } else {
+ /* RealTime */
+ frame_offset = req_id % MAX_PER_FRAME_ARRAY;
+ i2c_set = &fctrl->i2c_data.per_frame[frame_offset];
+ if ((i2c_set->is_settings_valid == true) &&
+ (i2c_set->request_id == req_id)) {
+ list_for_each_entry(i2c_list,
+ &(i2c_set->list_head), list) {
+ rc = cam_sensor_util_i2c_apply_setting(
+ &(fctrl->io_master_info), i2c_list);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed to apply settings: %d", rc);
+ return rc;
+ }
}
}
}
- return 0;
+ cam_flash_i2c_delete_req(fctrl, req_id);
+ return rc;
}
-int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
+int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl,
uint64_t req_id)
{
int rc = 0, i = 0;
@@ -344,12 +695,12 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
if (flash_data->opcode ==
CAMERA_SENSOR_FLASH_OP_FIREHIGH) {
- if (fctrl->flash_state !=
- CAM_FLASH_STATE_CONFIG) {
+ if (fctrl->flash_state ==
+ CAM_FLASH_STATE_START) {
CAM_WARN(CAM_FLASH,
- "Cannot apply Start Dev:Prev state: %d",
+ "Wrong state :Prev state: %d",
fctrl->flash_state);
- return rc;
+ return -EINVAL;
}
rc = cam_flash_prepare(fctrl, true);
if (rc) {
@@ -360,8 +711,27 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
rc = cam_flash_high(fctrl, flash_data);
if (rc)
CAM_ERR(CAM_FLASH,
- "FLASH ON failed : %d",
- rc);
+ "FLASH ON failed : %d", rc);
+ }
+ if (flash_data->opcode ==
+ CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+ if (fctrl->flash_state ==
+ CAM_FLASH_STATE_START) {
+ CAM_WARN(CAM_FLASH,
+ "Wrong state :Prev state: %d",
+ fctrl->flash_state);
+ return -EINVAL;
+ }
+ rc = cam_flash_prepare(fctrl, true);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Enable Regulator Failed rc = %d", rc);
+ return rc;
+ }
+ rc = cam_flash_low(fctrl, flash_data);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "TORCH ON failed : %d", rc);
}
if (flash_data->opcode ==
CAMERA_SENSOR_FLASH_OP_OFF) {
@@ -409,7 +779,6 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
} else if (fctrl->nrt_info.cmn_attr.cmd_type ==
CAMERA_SENSOR_FLASH_CMD_TYPE_RER) {
flash_data = &fctrl->nrt_info;
-
if (fctrl->flash_state != CAM_FLASH_STATE_START) {
rc = cam_flash_off(fctrl);
if (rc) {
@@ -442,8 +811,7 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
rc = cam_flash_off(fctrl);
if (rc) {
CAM_ERR(CAM_FLASH,
- "Flash off failed: %d",
- rc);
+ "Flash off failed: %d", rc);
continue;
}
fctrl->flash_state = CAM_FLASH_STATE_START;
@@ -505,15 +873,325 @@ int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl,
}
nrt_del_req:
- delete_req(fctrl, req_id);
+ cam_flash_pmic_delete_req(fctrl, req_id);
apply_setting_err:
return rc;
}
-int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
+int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg)
+{
+ int rc = 0, i = 0;
+ uintptr_t generic_ptr;
+ uint32_t total_cmd_buf_in_bytes = 0;
+ uint32_t processed_cmd_buf_in_bytes = 0;
+ uint16_t cmd_length_in_bytes = 0;
+ uint32_t *cmd_buf = NULL;
+ uint32_t *offset = NULL;
+ uint32_t frm_offset = 0;
+ size_t len_of_buffer;
+ struct cam_flash_init *flash_init = NULL;
+ struct common_header *cmn_hdr = NULL;
+ struct cam_control *ioctl_ctrl = NULL;
+ struct cam_packet *csl_packet = NULL;
+ struct cam_cmd_buf_desc *cmd_desc = NULL;
+ struct cam_config_dev_cmd config;
+ struct cam_req_mgr_add_request add_req;
+ struct i2c_data_settings *i2c_data = NULL;
+ struct i2c_settings_array *i2c_reg_settings = NULL;
+ struct cam_sensor_power_ctrl_t *power_info = NULL;
+
+ if (!fctrl || !arg) {
+ CAM_ERR(CAM_FLASH, "fctrl/arg is NULL");
+ return -EINVAL;
+ }
+ /* getting CSL Packet */
+ ioctl_ctrl = (struct cam_control *)arg;
+
+ if (copy_from_user((&config), u64_to_user_ptr(ioctl_ctrl->handle),
+ sizeof(config))) {
+ CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed");
+ return -EFAULT;
+ }
+
+ rc = cam_mem_get_cpu_buf(config.packet_handle,
+ &generic_ptr, &len_of_buffer);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc);
+ return rc;
+ }
+
+ if (config.offset > len_of_buffer) {
+ CAM_ERR(CAM_FLASH,
+ "offset is out of bounds: offset: %lld len: %zu",
+ config.offset, len_of_buffer);
+ return -EINVAL;
+ }
+
+ /* Add offset to the flash csl header */
+ csl_packet = (struct cam_packet *)(uintptr_t)(generic_ptr +
+ config.offset);
+ switch (csl_packet->header.op_code & 0xFFFFFF) {
+ case CAM_FLASH_PACKET_OPCODE_INIT: {
+ /* INIT packet*/
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+ /* Loop through multiple command buffers */
+ for (i = 1; i < csl_packet->num_cmd_buf; i++) {
+ total_cmd_buf_in_bytes = cmd_desc[i].length;
+ processed_cmd_buf_in_bytes = 0;
+ if (!total_cmd_buf_in_bytes)
+ continue;
+ rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+ &generic_ptr, &len_of_buffer);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH, "Failed to get cpu buf");
+ return rc;
+ }
+ cmd_buf = (uint32_t *)generic_ptr;
+ if (!cmd_buf) {
+ CAM_ERR(CAM_FLASH, "invalid cmd buf");
+ return -EINVAL;
+ }
+ cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+ cmn_hdr = (struct common_header *)cmd_buf;
+
+ /* Loop through cmd formats in one cmd buffer */
+ CAM_DBG(CAM_FLASH,
+ "command Type: %d,Processed: %d,Total: %d",
+ cmn_hdr->cmd_type, processed_cmd_buf_in_bytes,
+ total_cmd_buf_in_bytes);
+ switch (cmn_hdr->cmd_type) {
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO:
+ flash_init = (struct cam_flash_init *)cmd_buf;
+ fctrl->flash_type = flash_init->flash_type;
+ cmd_length_in_bytes =
+ sizeof(struct cam_flash_init);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/
+ sizeof(uint32_t);
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+ rc = cam_flash_slaveInfo_pkt_parser(
+ fctrl, cmd_buf);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH,
+ "Failed parsing slave info: rc: %d",
+ rc);
+ return rc;
+ }
+ cmd_length_in_bytes =
+ sizeof(struct cam_cmd_i2c_info);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/
+ sizeof(uint32_t);
+ break;
+ case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+ case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+ CAM_DBG(CAM_FLASH,
+ "Received power settings");
+ cmd_length_in_bytes =
+ total_cmd_buf_in_bytes;
+ rc = cam_sensor_update_power_settings(
+ cmd_buf,
+ total_cmd_buf_in_bytes,
+ &fctrl->power_info);
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/
+ sizeof(uint32_t);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed update power settings");
+ return rc;
+ }
+ break;
+ default:
+ CAM_DBG(CAM_FLASH,
+ "Received initSettings");
+ i2c_data = &(fctrl->i2c_data);
+ i2c_reg_settings =
+ &fctrl->i2c_data.init_settings;
+
+ i2c_reg_settings->request_id = 0;
+ i2c_reg_settings->is_settings_valid = 1;
+ rc = cam_sensor_i2c_command_parser(
+ &fctrl->io_master_info,
+ i2c_reg_settings,
+ &cmd_desc[i], 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_FLASH,
+ "pkt parsing failed: %d", rc);
+ return rc;
+ }
+ cmd_length_in_bytes =
+ cmd_desc[i].length;
+ processed_cmd_buf_in_bytes +=
+ cmd_length_in_bytes;
+ cmd_buf += cmd_length_in_bytes/
+ sizeof(uint32_t);
+
+ break;
+ }
+ }
+ power_info = &fctrl->power_info;
+ if (!power_info) {
+ CAM_ERR(CAM_FLASH, "Power_info is NULL");
+ return -EINVAL;
+ }
+
+ /* Parse and fill vreg params for power up settings */
+ rc = msm_camera_fill_vreg_params(&fctrl->soc_info,
+ power_info->power_setting,
+ power_info->power_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "failed to fill vreg params for power up rc:%d",
+ rc);
+ return rc;
+ }
+
+ /* Parse and fill vreg params for power down settings*/
+ rc = msm_camera_fill_vreg_params(
+ &fctrl->soc_info,
+ power_info->power_down_setting,
+ power_info->power_down_setting_size);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "failed to fill vreg params power down rc:%d",
+ rc);
+ return rc;
+ }
+
+ rc = fctrl->func_tbl.power_ops(fctrl, true);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Enable Regulator Failed rc = %d", rc);
+ return rc;
+ }
+
+ rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc);
+ return rc;
+ }
+
+ fctrl->flash_state = CAM_FLASH_STATE_CONFIG;
+ break;
+ }
+ case CAM_FLASH_PACKET_OPCODE_SET_OPS: {
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+ frm_offset = csl_packet->header.request_id %
+ MAX_PER_FRAME_ARRAY;
+ /* add support for handling i2c_data*/
+ i2c_reg_settings =
+ &fctrl->i2c_data.per_frame[frm_offset];
+ if (i2c_reg_settings->is_settings_valid == true) {
+ i2c_reg_settings->request_id = 0;
+ i2c_reg_settings->is_settings_valid = false;
+ goto update_req_mgr;
+ }
+ i2c_reg_settings->is_settings_valid = true;
+ i2c_reg_settings->request_id =
+ csl_packet->header.request_id;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_sensor_i2c_command_parser(
+ &fctrl->io_master_info,
+ i2c_reg_settings, cmd_desc, 1);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed in parsing i2c packets");
+ return rc;
+ }
+ break;
+ }
+ case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
+ offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+ csl_packet->cmd_buf_offset);
+
+ /* add support for handling i2c_data*/
+ i2c_reg_settings = &fctrl->i2c_data.config_settings;
+ if (i2c_reg_settings->is_settings_valid == true) {
+ i2c_reg_settings->request_id = 0;
+ i2c_reg_settings->is_settings_valid = false;
+
+ rc = delete_request(i2c_reg_settings);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed in Deleting the err: %d", rc);
+ return rc;
+ }
+ }
+ i2c_reg_settings->is_settings_valid = true;
+ i2c_reg_settings->request_id =
+ csl_packet->header.request_id;
+ cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+ rc = cam_sensor_i2c_command_parser(
+ &fctrl->io_master_info,
+ i2c_reg_settings, cmd_desc, 1);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Failed in parsing i2c NRT packets");
+ return rc;
+ }
+ rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "Apply setting failed: %d", rc);
+ return rc;
+ }
+ case CAM_PKT_NOP_OPCODE: {
+ if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) {
+ CAM_WARN(CAM_FLASH,
+ "Rxed NOP packets without linking");
+ frm_offset = csl_packet->header.request_id %
+ MAX_PER_FRAME_ARRAY;
+ fctrl->i2c_data.per_frame[frm_offset].is_settings_valid
+ = false;
+ return 0;
+ }
+
+ CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
+ csl_packet->header.request_id);
+ goto update_req_mgr;
+ }
+ default:
+ CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
+ (csl_packet->header.op_code & 0xFFFFFF));
+ return -EINVAL;
+ }
+update_req_mgr:
+ if (((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_PKT_NOP_OPCODE) ||
+ ((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_FLASH_PACKET_OPCODE_SET_OPS)) {
+ add_req.link_hdl = fctrl->bridge_intf.link_hdl;
+ add_req.req_id = csl_packet->header.request_id;
+ add_req.dev_hdl = fctrl->bridge_intf.device_hdl;
+
+ if ((csl_packet->header.op_code & 0xFFFFF) ==
+ CAM_FLASH_PACKET_OPCODE_SET_OPS)
+ add_req.skip_before_applying = 1;
+ else
+ add_req.skip_before_applying = 0;
+
+ if (fctrl->bridge_intf.crm_cb &&
+ fctrl->bridge_intf.crm_cb->add_req)
+ fctrl->bridge_intf.crm_cb->add_req(&add_req);
+ CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id);
+ }
+ return rc;
+}
+
+int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg)
{
int rc = 0, i = 0;
- uint64_t generic_ptr;
+ uintptr_t generic_ptr;
uint32_t *cmd_buf = NULL;
uint32_t *offset = NULL;
uint32_t frm_offset = 0;
@@ -537,7 +1215,8 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
/* getting CSL Packet */
ioctl_ctrl = (struct cam_control *)arg;
- if (copy_from_user((&config), (void __user *) ioctl_ctrl->handle,
+ if (copy_from_user((&config),
+ u64_to_user_ptr(ioctl_ctrl->handle),
sizeof(config))) {
CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed");
rc = -EFAULT;
@@ -545,7 +1224,7 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
}
rc = cam_mem_get_cpu_buf(config.packet_handle,
- (uint64_t *)&generic_ptr, &len_of_buffer);
+ &generic_ptr, &len_of_buffer);
if (rc) {
CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc);
return rc;
@@ -559,57 +1238,79 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
}
/* Add offset to the flash csl header */
- csl_packet = (struct cam_packet *)(generic_ptr + config.offset);
+ csl_packet =
+ (struct cam_packet *)(generic_ptr + (uint32_t)config.offset);
switch (csl_packet->header.op_code & 0xFFFFFF) {
case CAM_FLASH_PACKET_OPCODE_INIT: {
/* INIT packet*/
offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
csl_packet->cmd_buf_offset);
- fctrl->flash_init_setting.cmn_attr.request_id = 0;
- fctrl->flash_init_setting.cmn_attr.is_settings_valid = true;
cmd_desc = (struct cam_cmd_buf_desc *)(offset);
rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
- (uint64_t *)&generic_ptr, &len_of_buffer);
+ &generic_ptr, &len_of_buffer);
cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
cmd_desc->offset);
cam_flash_info = (struct cam_flash_init *)cmd_buf;
switch (cam_flash_info->cmd_type) {
- case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO:
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO: {
+ CAM_DBG(CAM_FLASH, "INIT_INFO CMD CALLED");
+ fctrl->flash_init_setting.cmn_attr.request_id = 0;
+ fctrl->flash_init_setting.cmn_attr.is_settings_valid =
+ true;
fctrl->flash_type = cam_flash_info->flash_type;
fctrl->is_regulator_enabled = false;
fctrl->nrt_info.cmn_attr.cmd_type =
CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO;
+
+ rc = fctrl->func_tbl.power_ops(fctrl, true);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "Enable Regulator Failed rc = %d", rc);
+ return rc;
+ }
+
fctrl->flash_state =
CAM_FLASH_STATE_CONFIG;
break;
- case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE:
- CAM_DBG(CAM_FLASH, "Widget Flash Operation");
- flash_operation_info =
- (struct cam_flash_set_on_off *) cmd_buf;
- fctrl->nrt_info.cmn_attr.count =
- flash_operation_info->count;
- fctrl->nrt_info.cmn_attr.request_id = 0;
- fctrl->nrt_info.opcode =
- flash_operation_info->opcode;
- fctrl->nrt_info.cmn_attr.cmd_type =
- CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE;
- for (i = 0;
- i < flash_operation_info->count; i++)
- fctrl->nrt_info.led_current_ma[i] =
- flash_operation_info->led_current_ma[i];
+ }
+ case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE: {
+ CAM_DBG(CAM_FLASH, "INIT_FIRE Operation");
- mutex_lock(&fctrl->flash_wq_mutex);
- rc = cam_flash_apply_setting(fctrl, 0);
- if (rc)
- CAM_ERR(CAM_FLASH,
- "Apply setting failed: %d",
- rc);
- mutex_unlock(&fctrl->flash_wq_mutex);
- fctrl->flash_state =
- CAM_FLASH_STATE_CONFIG;
+ flash_operation_info =
+ (struct cam_flash_set_on_off *) cmd_buf;
+ if (!flash_operation_info) {
+ CAM_ERR(CAM_FLASH,
+ "flash_operation_info Null");
+ return -EINVAL;
+ }
+ if (flash_operation_info->count >
+ CAM_FLASH_MAX_LED_TRIGGERS) {
+ CAM_ERR(CAM_FLASH, "led count out of limit");
+ return -EINVAL;
+ }
+ fctrl->nrt_info.cmn_attr.count =
+ flash_operation_info->count;
+ fctrl->nrt_info.cmn_attr.request_id = 0;
+ fctrl->nrt_info.opcode =
+ flash_operation_info->opcode;
+ fctrl->nrt_info.cmn_attr.cmd_type =
+ CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE;
+ for (i = 0;
+ i < flash_operation_info->count; i++)
+ fctrl->nrt_info.led_current_ma[i] =
+ flash_operation_info->led_current_ma[i];
+
+ rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+ if (rc)
+ CAM_ERR(CAM_FLASH,
+ "Apply setting failed: %d",
+ rc);
+
+ fctrl->flash_state = CAM_FLASH_STATE_CONFIG;
break;
+ }
default:
CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
cam_flash_info->cmd_type);
@@ -635,7 +1336,7 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
flash_data->cmn_attr.is_settings_valid = true;
cmd_desc = (struct cam_cmd_buf_desc *)(offset);
rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
- (uint64_t *)&generic_ptr, &len_of_buffer);
+ &generic_ptr, &len_of_buffer);
cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
cmd_desc->offset);
@@ -647,7 +1348,7 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
switch (cmn_hdr->cmd_type) {
case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: {
CAM_DBG(CAM_FLASH,
- "CAMERA_FLASH_CMD_TYPE_OPS case called");
+ "CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE cmd called");
if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
(fctrl->flash_state ==
CAM_FLASH_STATE_ACQUIRE)) {
@@ -664,6 +1365,11 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
"flash_operation_info Null");
return -EINVAL;
}
+ if (flash_operation_info->count >
+ CAM_FLASH_MAX_LED_TRIGGERS) {
+ CAM_ERR(CAM_FLASH, "led count out of limit");
+ return -EINVAL;
+ }
flash_data->opcode = flash_operation_info->opcode;
flash_data->cmn_attr.count =
@@ -671,8 +1377,8 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
for (i = 0; i < flash_operation_info->count; i++)
flash_data->led_current_ma[i]
= flash_operation_info->led_current_ma[i];
- }
- break;
+ }
+ break;
default:
CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
cmn_hdr->cmd_type);
@@ -686,7 +1392,7 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
fctrl->nrt_info.cmn_attr.is_settings_valid = true;
cmd_desc = (struct cam_cmd_buf_desc *)(offset);
rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
- (uint64_t *)&generic_ptr, &len_of_buffer);
+ &generic_ptr, &len_of_buffer);
cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
cmd_desc->offset);
cmn_hdr = (struct common_header *)cmd_buf;
@@ -696,6 +1402,17 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
CAM_DBG(CAM_FLASH, "Widget Flash Operation");
flash_operation_info =
(struct cam_flash_set_on_off *) cmd_buf;
+ if (!flash_operation_info) {
+ CAM_ERR(CAM_FLASH,
+ "flash_operation_info Null");
+ return -EINVAL;
+ }
+ if (flash_operation_info->count >
+ CAM_FLASH_MAX_LED_TRIGGERS) {
+ CAM_ERR(CAM_FLASH, "led count out of limit");
+ return -EINVAL;
+ }
+
fctrl->nrt_info.cmn_attr.count =
flash_operation_info->count;
fctrl->nrt_info.cmn_attr.request_id = 0;
@@ -708,12 +1425,10 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
fctrl->nrt_info.led_current_ma[i] =
flash_operation_info->led_current_ma[i];
- mutex_lock(&fctrl->flash_wq_mutex);
- rc = cam_flash_apply_setting(fctrl, 0);
+ rc = fctrl->func_tbl.apply_setting(fctrl, 0);
if (rc)
CAM_ERR(CAM_FLASH, "Apply setting failed: %d",
rc);
- mutex_unlock(&fctrl->flash_wq_mutex);
return rc;
}
case CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR: {
@@ -737,6 +1452,17 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
case CAMERA_SENSOR_FLASH_CMD_TYPE_RER: {
rc = 0;
flash_rer_info = (struct cam_flash_set_rer *)cmd_buf;
+ if (!flash_rer_info) {
+ CAM_ERR(CAM_FLASH,
+ "flash_rer_info Null");
+ return -EINVAL;
+ }
+ if (flash_rer_info->count >
+ CAM_FLASH_MAX_LED_TRIGGERS) {
+ CAM_ERR(CAM_FLASH, "led count out of limit");
+ return -EINVAL;
+ }
+
fctrl->nrt_info.cmn_attr.cmd_type =
CAMERA_SENSOR_FLASH_CMD_TYPE_RER;
fctrl->nrt_info.opcode = flash_rer_info->opcode;
@@ -754,12 +1480,10 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
flash_rer_info->led_current_ma[i];
- mutex_lock(&fctrl->flash_wq_mutex);
- rc = cam_flash_apply_setting(fctrl, 0);
+ rc = fctrl->func_tbl.apply_setting(fctrl, 0);
if (rc)
CAM_ERR(CAM_FLASH, "apply_setting failed: %d",
rc);
- mutex_unlock(&fctrl->flash_wq_mutex);
return rc;
}
default:
@@ -767,7 +1491,6 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
cmn_hdr->cmd_type);
return -EINVAL;
}
-
break;
}
case CAM_PKT_NOP_OPCODE: {
@@ -785,7 +1508,7 @@ int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg)
fctrl->per_frame[frm_offset].cmn_attr.is_settings_valid = false;
fctrl->per_frame[frm_offset].cmn_attr.request_id = 0;
fctrl->per_frame[frm_offset].opcode = CAM_PKT_NOP_OPCODE;
- CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
+ CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %llu",
csl_packet->header.request_id);
goto update_req_mgr;
}
@@ -839,7 +1562,7 @@ int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
CAM_ERR(CAM_FLASH, " Device data is NULL");
return -EINVAL;
}
-
+ mutex_lock(&fctrl->flash_mutex);
if (link->link_enable) {
fctrl->bridge_intf.link_hdl = link->link_hdl;
fctrl->bridge_intf.crm_cb = link->crm_cb;
@@ -847,43 +1570,11 @@ int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
fctrl->bridge_intf.link_hdl = -1;
fctrl->bridge_intf.crm_cb = NULL;
}
+ mutex_unlock(&fctrl->flash_mutex);
return 0;
}
-
-int cam_flash_stop_dev(struct cam_flash_ctrl *fctrl)
-{
- int rc = 0, i, j;
-
- cam_flash_off(fctrl);
-
- for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
- fctrl->per_frame[i].cmn_attr.request_id = 0;
- fctrl->per_frame[i].cmn_attr.is_settings_valid = false;
- fctrl->per_frame[i].cmn_attr.count = 0;
- for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
- fctrl->per_frame[i].led_current_ma[j] = 0;
- }
-
- rc = cam_flash_flush_nrt(fctrl);
- if (rc) {
- CAM_ERR(CAM_FLASH,
- "NonRealTime Dev flush failed rc: %d", rc);
- return rc;
- }
-
- if ((fctrl->flash_state == CAM_FLASH_STATE_START) &&
- (fctrl->is_regulator_enabled == true)) {
- rc = cam_flash_prepare(fctrl, false);
- if (rc)
- CAM_ERR(CAM_FLASH, "Disable Regulator Failed rc: %d",
- rc);
- }
-
- return rc;
-}
-
int cam_flash_release_dev(struct cam_flash_ctrl *fctrl)
{
int rc = 0;
@@ -911,9 +1602,11 @@ void cam_flash_shutdown(struct cam_flash_ctrl *fctrl)
if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) ||
(fctrl->flash_state == CAM_FLASH_STATE_START)) {
- rc = cam_flash_stop_dev(fctrl);
+ fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+ rc = fctrl->func_tbl.power_ops(fctrl, false);
if (rc)
- CAM_ERR(CAM_FLASH, "Stop Failed rc: %d", rc);
+ CAM_ERR(CAM_FLASH, "Power Down Failed rc: %d",
+ rc);
}
rc = cam_flash_release_dev(fctrl);
@@ -937,12 +1630,12 @@ int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply)
return -EINVAL;
}
- mutex_lock(&fctrl->flash_wq_mutex);
- rc = cam_flash_apply_setting(fctrl, apply->request_id);
+ mutex_lock(&fctrl->flash_mutex);
+ rc = fctrl->func_tbl.apply_setting(fctrl, apply->request_id);
if (rc)
CAM_ERR(CAM_FLASH, "apply_setting failed with rc=%d",
rc);
- mutex_unlock(&fctrl->flash_wq_mutex);
+ mutex_unlock(&fctrl->flash_mutex);
return rc;
}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
index f73409a0a935..1bd3b31c1668 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,20 +16,12 @@
#include <linux/leds-qpnp-flash.h>
#include <media/cam_sensor.h>
#include "cam_flash_dev.h"
-#include "cam_sync_api.h"
-#include "cam_mem_mgr_api.h"
-int cam_flash_parser(struct cam_flash_ctrl *fctrl, void *arg);
int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info);
int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link);
-int cam_flash_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply);
int cam_flash_process_evt(struct cam_req_mgr_link_evt_data *event_data);
int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush);
-int cam_flash_off(struct cam_flash_ctrl *fctrl);
-int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
- bool regulator_enable);
-void cam_flash_shutdown(struct cam_flash_ctrl *flash_ctrl);
-int cam_flash_stop_dev(struct cam_flash_ctrl *flash_ctrl);
-int cam_flash_release_dev(struct cam_flash_ctrl *fctrl);
+
+
#endif /*_CAM_FLASH_CORE_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
index d9b5f6406058..cdd6a98d2bf1 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
#include "cam_flash_dev.h"
#include "cam_flash_soc.h"
#include "cam_flash_core.h"
+#include "cam_common_util.h"
static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
void *arg, struct cam_flash_private_soc *soc_private)
@@ -57,7 +58,8 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
goto release_mutex;
}
- rc = copy_from_user(&flash_acq_dev, (void __user *)cmd->handle,
+ rc = copy_from_user(&flash_acq_dev,
+ u64_to_user_ptr(cmd->handle),
sizeof(flash_acq_dev));
if (rc) {
CAM_ERR(CAM_FLASH, "Failed Copying from User");
@@ -77,7 +79,8 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
fctrl->bridge_intf.session_hdl =
flash_acq_dev.session_handle;
- rc = copy_to_user((void __user *) cmd->handle, &flash_acq_dev,
+ rc = copy_to_user(u64_to_user_ptr(cmd->handle),
+ &flash_acq_dev,
sizeof(struct cam_sensor_acquire_dev));
if (rc) {
CAM_ERR(CAM_FLASH, "Failed Copy to User with rc = %d",
@@ -93,7 +96,7 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
(fctrl->flash_state == CAM_FLASH_STATE_START)) {
CAM_WARN(CAM_FLASH,
- "Cannot apply Release dev: Prev state:%d",
+ "Wrong state for Release dev: Prev state:%d",
fctrl->flash_state);
}
@@ -106,11 +109,18 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
rc = -EINVAL;
goto release_mutex;
}
- rc = cam_flash_release_dev(fctrl);
- if (rc)
- CAM_ERR(CAM_FLASH,
- "Failed in destroying the device Handle rc= %d",
- rc);
+
+ if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) ||
+ (fctrl->flash_state == CAM_FLASH_STATE_START))
+ fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+
+ if (cam_flash_release_dev(fctrl))
+ CAM_WARN(CAM_FLASH,
+ "Failed in destroying the device Handle");
+
+ if (fctrl->func_tbl.power_ops(fctrl, false))
+ CAM_WARN(CAM_FLASH, "Power Down Failed");
+
fctrl->flash_state = CAM_FLASH_STATE_INIT;
break;
}
@@ -130,8 +140,8 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
flash_cap.max_current_torch[i] =
soc_private->torch_max_current[i];
- if (copy_to_user((void __user *) cmd->handle, &flash_cap,
- sizeof(struct cam_flash_query_cap_info))) {
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &flash_cap, sizeof(struct cam_flash_query_cap_info))) {
CAM_ERR(CAM_FLASH, "Failed Copy to User");
rc = -EFAULT;
goto release_mutex;
@@ -149,17 +159,6 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
goto release_mutex;
}
- rc = cam_flash_prepare(fctrl, true);
- if (rc) {
- CAM_ERR(CAM_FLASH,
- "Enable Regulator Failed rc = %d", rc);
- goto release_mutex;
- }
- rc = cam_flash_apply_setting(fctrl, 0);
- if (rc) {
- CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc);
- goto release_mutex;
- }
fctrl->flash_state = CAM_FLASH_STATE_START;
break;
}
@@ -173,18 +172,13 @@ static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
goto release_mutex;
}
- rc = cam_flash_stop_dev(fctrl);
- if (rc) {
- CAM_ERR(CAM_FLASH, "Stop Dev Failed rc = %d",
- rc);
- goto release_mutex;
- }
+ fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
fctrl->flash_state = CAM_FLASH_STATE_ACQUIRE;
break;
}
case CAM_CONFIG_DEV: {
CAM_DBG(CAM_FLASH, "CAM_CONFIG_DEV");
- rc = cam_flash_parser(fctrl, arg);
+ rc = fctrl->func_tbl.parser(fctrl, arg);
if (rc) {
CAM_ERR(CAM_FLASH, "Failed Flash Config: rc=%d\n", rc);
goto release_mutex;
@@ -201,6 +195,35 @@ release_mutex:
return rc;
}
+static int32_t cam_flash_init_default_params(struct cam_flash_ctrl *fctrl)
+{
+ /* Validate input parameters */
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "failed: invalid params fctrl %pK",
+ fctrl);
+ return -EINVAL;
+ }
+
+ CAM_DBG(CAM_FLASH,
+ "master_type: %d", fctrl->io_master_info.master_type);
+ /* Initialize cci_client */
+ if (fctrl->io_master_info.master_type == CCI_MASTER) {
+ fctrl->io_master_info.cci_client = kzalloc(sizeof(
+ struct cam_sensor_cci_client), GFP_KERNEL);
+ if (!(fctrl->io_master_info.cci_client))
+ return -ENOMEM;
+ } else if (fctrl->io_master_info.master_type == I2C_MASTER) {
+ if (!(fctrl->io_master_info.client))
+ return -EINVAL;
+ } else {
+ CAM_ERR(CAM_FLASH,
+ "Invalid master / Master type Not supported");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct of_device_id cam_flash_dt_match[] = {
{.compatible = "qcom,camera-flash", .data = NULL},
{}
@@ -291,20 +314,36 @@ static int cam_flash_platform_remove(struct platform_device *pdev)
return 0;
}
+static int32_t cam_flash_i2c_driver_remove(struct i2c_client *client)
+{
+ int32_t rc = 0;
+ struct cam_flash_ctrl *fctrl = i2c_get_clientdata(client);
+ /* Handle I2C Devices */
+ if (!fctrl) {
+ CAM_ERR(CAM_FLASH, "Flash device is NULL");
+ return -EINVAL;
+ }
+ /*Free Allocated Mem */
+ kfree(fctrl->i2c_data.per_frame);
+ fctrl->i2c_data.per_frame = NULL;
+ kfree(fctrl);
+ return rc;
+}
+
static int cam_flash_subdev_close(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
- struct cam_flash_ctrl *flash_ctrl =
+ struct cam_flash_ctrl *fctrl =
v4l2_get_subdevdata(sd);
- if (!flash_ctrl) {
+ if (!fctrl) {
CAM_ERR(CAM_FLASH, "Flash ctrl ptr is NULL");
return -EINVAL;
}
- mutex_lock(&flash_ctrl->flash_mutex);
- cam_flash_shutdown(flash_ctrl);
- mutex_unlock(&flash_ctrl->flash_mutex);
+ mutex_lock(&fctrl->flash_mutex);
+ cam_flash_shutdown(fctrl);
+ mutex_unlock(&fctrl->flash_mutex);
return 0;
}
@@ -324,10 +363,32 @@ static const struct v4l2_subdev_internal_ops cam_flash_internal_ops = {
.close = cam_flash_subdev_close,
};
+static int cam_flash_init_subdev(struct cam_flash_ctrl *fctrl)
+{
+ int rc = 0;
+
+ strlcpy(fctrl->device_name, CAM_FLASH_NAME,
+ sizeof(fctrl->device_name));
+ fctrl->v4l2_dev_str.internal_ops =
+ &cam_flash_internal_ops;
+ fctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops;
+ fctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME;
+ fctrl->v4l2_dev_str.sd_flags =
+ V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ fctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE;
+ fctrl->v4l2_dev_str.token = fctrl;
+
+ rc = cam_register_subdev(&(fctrl->v4l2_dev_str));
+ if (rc)
+ CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc);
+
+ return rc;
+}
+
static int32_t cam_flash_platform_probe(struct platform_device *pdev)
{
- int32_t rc = 0;
- struct cam_flash_ctrl *flash_ctrl = NULL;
+ int32_t rc = 0, i = 0;
+ struct cam_flash_ctrl *fctrl = NULL;
CAM_DBG(CAM_FLASH, "Enter");
if (!pdev->dev.of_node) {
@@ -335,53 +396,181 @@ static int32_t cam_flash_platform_probe(struct platform_device *pdev)
return -EINVAL;
}
- flash_ctrl = kzalloc(sizeof(struct cam_flash_ctrl), GFP_KERNEL);
- if (!flash_ctrl)
+ fctrl = kzalloc(sizeof(struct cam_flash_ctrl), GFP_KERNEL);
+ if (!fctrl)
return -ENOMEM;
- flash_ctrl->pdev = pdev;
- flash_ctrl->soc_info.pdev = pdev;
- flash_ctrl->soc_info.dev = &pdev->dev;
- flash_ctrl->soc_info.dev_name = pdev->name;
+ fctrl->pdev = pdev;
+ fctrl->soc_info.pdev = pdev;
+ fctrl->soc_info.dev = &pdev->dev;
+ fctrl->soc_info.dev_name = pdev->name;
+
+ platform_set_drvdata(pdev, fctrl);
- rc = cam_flash_get_dt_data(flash_ctrl, &flash_ctrl->soc_info);
+ rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info);
if (rc) {
CAM_ERR(CAM_FLASH, "cam_flash_get_dt_data failed with %d", rc);
- kfree(flash_ctrl);
+ kfree(fctrl);
return -EINVAL;
}
- flash_ctrl->v4l2_dev_str.internal_ops =
- &cam_flash_internal_ops;
- flash_ctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops;
- flash_ctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME;
- flash_ctrl->v4l2_dev_str.sd_flags =
- V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
- flash_ctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE;
- flash_ctrl->v4l2_dev_str.token = flash_ctrl;
+ if (of_find_property(pdev->dev.of_node, "cci-master", NULL)) {
+ /* Get CCI master */
+ rc = of_property_read_u32(pdev->dev.of_node, "cci-master",
+ &fctrl->cci_i2c_master);
+ CAM_DBG(CAM_FLASH, "cci-master %d, rc %d",
+ fctrl->cci_i2c_master, rc);
+ if (rc < 0) {
+ /* Set default master 0 */
+ fctrl->cci_i2c_master = MASTER_0;
+ rc = 0;
+ }
+
+ fctrl->io_master_info.master_type = CCI_MASTER;
+ rc = cam_flash_init_default_params(fctrl);
+ if (rc) {
+ CAM_ERR(CAM_FLASH,
+ "failed: cam_flash_init_default_params rc %d",
+ rc);
+ return rc;
+ }
+
+ fctrl->i2c_data.per_frame = (struct i2c_settings_array *)
+ kzalloc(sizeof(struct i2c_settings_array) *
+ MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+ if (fctrl->i2c_data.per_frame == NULL) {
+ CAM_ERR(CAM_FLASH, "No Memory");
+ rc = -ENOMEM;
+ goto free_cci_resource;
+ }
+
+ INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head));
+ INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head));
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+ INIT_LIST_HEAD(
+ &(fctrl->i2c_data.per_frame[i].list_head));
+
+ fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser;
+ fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting;
+ fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops;
+ fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request;
+ } else {
+ /* PMIC Flash */
+ fctrl->func_tbl.parser = cam_flash_pmic_pkt_parser;
+ fctrl->func_tbl.apply_setting = cam_flash_pmic_apply_setting;
+ fctrl->func_tbl.power_ops = cam_flash_pmic_power_ops;
+ fctrl->func_tbl.flush_req = cam_flash_pmic_flush_request;
+ }
- rc = cam_register_subdev(&(flash_ctrl->v4l2_dev_str));
+ rc = cam_flash_init_subdev(fctrl);
if (rc) {
- CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc);
- goto free_resource;
+ if (fctrl->io_master_info.cci_client != NULL)
+ goto free_cci_resource;
+ else
+ goto free_resource;
}
- flash_ctrl->bridge_intf.device_hdl = -1;
- flash_ctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
- flash_ctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
- flash_ctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
- flash_ctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
- platform_set_drvdata(pdev, flash_ctrl);
- v4l2_set_subdevdata(&flash_ctrl->v4l2_dev_str.sd, flash_ctrl);
+ fctrl->bridge_intf.device_hdl = -1;
+ fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
+ fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
+ fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
+ fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
- mutex_init(&(flash_ctrl->flash_mutex));
- mutex_init(&(flash_ctrl->flash_wq_mutex));
+ mutex_init(&(fctrl->flash_mutex));
- flash_ctrl->flash_state = CAM_FLASH_STATE_INIT;
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
CAM_DBG(CAM_FLASH, "Probe success");
return rc;
+
+free_cci_resource:
+ kfree(fctrl->io_master_info.cci_client);
+ fctrl->io_master_info.cci_client = NULL;
free_resource:
- kfree(flash_ctrl);
+ kfree(fctrl->i2c_data.per_frame);
+ kfree(fctrl->soc_info.soc_private);
+ cam_soc_util_release_platform_resource(&fctrl->soc_info);
+ fctrl->i2c_data.per_frame = NULL;
+ fctrl->soc_info.soc_private = NULL;
+ kfree(fctrl);
+ fctrl = NULL;
+ return rc;
+}
+
+static int32_t cam_flash_i2c_driver_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int32_t rc = 0, i = 0;
+ struct cam_flash_ctrl *fctrl;
+
+ if (client == NULL || id == NULL) {
+ CAM_ERR(CAM_FLASH, "Invalid Args client: %pK id: %pK",
+ client, id);
+ return -EINVAL;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ CAM_ERR(CAM_FLASH, "%s :: i2c_check_functionality failed",
+ client->name);
+ return -EFAULT;
+ }
+
+ /* Create sensor control structure */
+ fctrl = kzalloc(sizeof(*fctrl), GFP_KERNEL);
+ if (!fctrl)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, fctrl);
+
+ fctrl->io_master_info.client = client;
+ fctrl->soc_info.dev = &client->dev;
+ fctrl->soc_info.dev_name = client->name;
+ fctrl->io_master_info.master_type = I2C_MASTER;
+
+ rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "failed: cam_sensor_parse_dt rc %d", rc);
+ goto free_ctrl;
+ }
+
+ rc = cam_flash_init_subdev(fctrl);
+ if (rc)
+ goto free_ctrl;
+
+ fctrl->i2c_data.per_frame =
+ (struct i2c_settings_array *)
+ kzalloc(sizeof(struct i2c_settings_array) *
+ MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+ if (fctrl->i2c_data.per_frame == NULL) {
+ rc = -ENOMEM;
+ goto unreg_subdev;
+ }
+
+ INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head));
+ INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head));
+ for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+ INIT_LIST_HEAD(&(fctrl->i2c_data.per_frame[i].list_head));
+
+ fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser;
+ fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting;
+ fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops;
+ fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request;
+
+ fctrl->bridge_intf.device_hdl = -1;
+ fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
+ fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
+ fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
+ fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
+
+ mutex_init(&(fctrl->flash_mutex));
+ fctrl->flash_state = CAM_FLASH_STATE_INIT;
+
+ return rc;
+
+unreg_subdev:
+ cam_unregister_subdev(&(fctrl->v4l2_dev_str));
+free_ctrl:
+ kfree(fctrl);
+ fctrl = NULL;
return rc;
}
@@ -398,20 +587,40 @@ static struct platform_driver cam_flash_platform_driver = {
},
};
-static int __init cam_flash_init_module(void)
+static const struct i2c_device_id i2c_id[] = {
+ {FLASH_DRIVER_I2C, (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver cam_flash_i2c_driver = {
+ .id_table = i2c_id,
+ .probe = cam_flash_i2c_driver_probe,
+ .remove = cam_flash_i2c_driver_remove,
+ .driver = {
+ .name = FLASH_DRIVER_I2C,
+ },
+};
+
+static int32_t __init cam_flash_init_module(void)
{
int32_t rc = 0;
rc = platform_driver_register(&cam_flash_platform_driver);
- if (rc)
- CAM_ERR(CAM_FLASH, "platform probe for flash failed");
+ if (rc == 0) {
+ CAM_DBG(CAM_FLASH, "platform probe success");
+ return 0;
+ }
+ rc = i2c_add_driver(&cam_flash_i2c_driver);
+ if (rc)
+ CAM_ERR(CAM_FLASH, "i2c_add_driver failed rc: %d", rc);
return rc;
}
static void __exit cam_flash_exit_module(void)
{
platform_driver_unregister(&cam_flash_platform_driver);
+ i2c_del_driver(&cam_flash_i2c_driver);
}
module_init(cam_flash_init_module);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
index 4adc1b2e32a9..6a1ce99bace8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,15 +33,22 @@
#include "cam_sensor_cmn_header.h"
#include "cam_soc_util.h"
#include "cam_debug_util.h"
+#include "cam_sensor_io.h"
+#include "cam_flash_core.h"
+#include "cam_context.h"
#define CAMX_FLASH_DEV_NAME "cam-flash-dev"
#define CAM_FLASH_PIPELINE_DELAY 1
+#define FLASH_DRIVER_I2C "i2c_flash"
+
#define CAM_FLASH_PACKET_OPCODE_INIT 0
#define CAM_FLASH_PACKET_OPCODE_SET_OPS 1
#define CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS 2
+struct cam_flash_ctrl;
+
enum cam_flash_switch_trigger_ops {
LED_SWITCH_OFF = 0,
LED_SWITCH_ON,
@@ -54,6 +61,12 @@ enum cam_flash_state {
CAM_FLASH_STATE_START,
};
+enum cam_flash_flush_type {
+ FLUSH_ALL = 0,
+ FLUSH_REQ,
+ FLUSH_MAX,
+};
+
/**
* struct cam_flash_intf_params
* @device_hdl : Device Handle
@@ -136,8 +149,17 @@ struct cam_flash_private_soc {
uint32_t torch_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
};
+struct cam_flash_func_tbl {
+ int (*parser)(struct cam_flash_ctrl *fctrl, void *arg);
+ int (*apply_setting)(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+ int (*power_ops)(struct cam_flash_ctrl *fctrl, bool regulator_enable);
+ int (*flush_req)(struct cam_flash_ctrl *fctrl,
+ enum cam_flash_flush_type type, uint64_t req_id);
+};
+
/**
* struct cam_flash_ctrl
+ * @device_name : Device name
* @soc_info : Soc related information
* @pdev : Platform device
* @per_frame[] : Per_frame setting array
@@ -150,32 +172,58 @@ struct cam_flash_private_soc {
* @flash_num_sources : Number of flash sources
* @torch_num_source : Number of torch sources
* @flash_mutex : Mutex for flash operations
- * @flash_wq_mutex : Mutex for flash apply setting
- * @flash_state : Current flash state (LOW/OFF/ON/INIT)
+ * @flash_state : Current flash state (LOW/OFF/ON/INIT)
* @flash_type : Flash types (PMIC/I2C/GPIO)
* @is_regulator_enable : Regulator disable/enable notifier
+ * @func_tbl : Function table for different HW
+ * (e.g. i2c/pmic/gpio)
* @flash_trigger : Flash trigger ptr
* @torch_trigger : Torch trigger ptr
+ * @cci_i2c_master : I2C structure
+ * @io_master_info : Information about the communication master
+ * @i2c_data : I2C register settings
*/
struct cam_flash_ctrl {
- struct cam_hw_soc_info soc_info;
- struct platform_device *pdev;
- struct cam_flash_frame_setting per_frame[MAX_PER_FRAME_ARRAY];
- struct cam_flash_frame_setting nrt_info;
- struct device_node *of_node;
- struct cam_subdev v4l2_dev_str;
- struct cam_flash_intf_params bridge_intf;
- struct cam_flash_init_packet flash_init_setting;
- struct led_trigger *switch_trigger;
- uint32_t flash_num_sources;
- uint32_t torch_num_sources;
- struct mutex flash_mutex;
- struct mutex flash_wq_mutex;
- enum cam_flash_state flash_state;
- uint8_t flash_type;
- bool is_regulator_enabled;
+ char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
+ struct cam_hw_soc_info soc_info;
+ struct platform_device *pdev;
+ struct cam_sensor_power_ctrl_t power_info;
+ struct cam_flash_frame_setting per_frame[MAX_PER_FRAME_ARRAY];
+ struct cam_flash_frame_setting nrt_info;
+ struct device_node *of_node;
+ struct cam_subdev v4l2_dev_str;
+ struct cam_flash_intf_params bridge_intf;
+ struct cam_flash_init_packet flash_init_setting;
+ struct led_trigger *switch_trigger;
+ uint32_t flash_num_sources;
+ uint32_t torch_num_sources;
+ struct mutex flash_mutex;
+ enum cam_flash_state flash_state;
+ uint8_t flash_type;
+ bool is_regulator_enabled;
+ struct cam_flash_func_tbl func_tbl;
struct led_trigger *flash_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
struct led_trigger *torch_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+/* I2C related setting */
+ enum cci_i2c_master_t cci_i2c_master;
+ struct camera_io_master io_master_info;
+ struct i2c_data_settings i2c_data;
};
+int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg);
+int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg);
+int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+int cam_flash_off(struct cam_flash_ctrl *fctrl);
+int cam_flash_pmic_power_ops(struct cam_flash_ctrl *fctrl,
+ bool regulator_enable);
+int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl,
+ bool regulator_enable);
+int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl,
+ enum cam_flash_flush_type type, uint64_t req_id);
+int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl,
+ enum cam_flash_flush_type, uint64_t req_id);
+void cam_flash_shutdown(struct cam_flash_ctrl *fctrl);
+int cam_flash_release_dev(struct cam_flash_ctrl *fctrl);
+
#endif /*_CAM_FLASH_DEV_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
index a195762c249f..22a124d86f93 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -193,32 +193,31 @@ int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl,
return -EINVAL;
}
- of_node = fctrl->pdev->dev.of_node;
-
- rc = cam_soc_util_get_dt_properties(soc_info);
- if (rc < 0) {
- CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc);
- return rc;
- }
-
soc_info->soc_private =
kzalloc(sizeof(struct cam_flash_private_soc), GFP_KERNEL);
if (!soc_info->soc_private) {
rc = -ENOMEM;
goto release_soc_res;
}
+ of_node = fctrl->pdev->dev.of_node;
+
+ rc = cam_soc_util_get_dt_properties(soc_info);
+ if (rc) {
+ CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc);
+ goto free_soc_private;
+ }
rc = cam_get_source_node_info(of_node, fctrl, soc_info->soc_private);
- if (rc < 0) {
+ if (rc) {
CAM_ERR(CAM_FLASH,
"cam_flash_get_pmic_source_info failed rc %d", rc);
goto free_soc_private;
}
-
return rc;
free_soc_private:
kfree(soc_info->soc_private);
+ soc_info->soc_private = NULL;
release_soc_res:
cam_soc_util_release_platform_resource(soc_info);
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
index 9397c6844737..e525c46656be 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
@@ -6,5 +6,6 @@ ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_ut
ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
obj-$(CONFIG_SPECTRA_CAMERA) += cam_ois_dev.o cam_ois_core.o cam_ois_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index 05706556d9db..850b315c26a7 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -18,6 +18,7 @@
#include "cam_sensor_util.h"
#include "cam_debug_util.h"
#include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
int32_t cam_ois_construct_default_power_setting(
struct cam_sensor_power_ctrl_t *power_info)
@@ -79,7 +80,7 @@ static int cam_ois_get_dev_handle(struct cam_ois_ctrl_t *o_ctrl,
CAM_ERR(CAM_OIS, "Device is already acquired");
return -EFAULT;
}
- if (copy_from_user(&ois_acq_dev, (void __user *) cmd->handle,
+ if (copy_from_user(&ois_acq_dev, u64_to_user_ptr(cmd->handle),
sizeof(ois_acq_dev)))
return -EFAULT;
@@ -95,7 +96,7 @@ static int cam_ois_get_dev_handle(struct cam_ois_ctrl_t *o_ctrl,
o_ctrl->bridge_intf.session_hdl = ois_acq_dev.session_handle;
CAM_DBG(CAM_OIS, "Device Handle: %d", ois_acq_dev.device_handle);
- if (copy_to_user((void __user *) cmd->handle, &ois_acq_dev,
+ if (copy_to_user(u64_to_user_ptr(cmd->handle), &ois_acq_dev,
sizeof(struct cam_sensor_acquire_dev))) {
CAM_ERR(CAM_OIS, "ACQUIRE_DEV: copy to user failed");
return -EFAULT;
@@ -193,7 +194,7 @@ static int cam_ois_power_down(struct cam_ois_ctrl_t *o_ctrl)
return -EINVAL;
}
- rc = msm_camera_power_down(power_info, soc_info);
+ rc = cam_sensor_util_power_down(power_info, soc_info);
if (rc) {
CAM_ERR(CAM_OIS, "power down the core is failed:%d", rc);
return rc;
@@ -425,12 +426,12 @@ static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg)
int32_t i = 0;
uint32_t total_cmd_buf_in_bytes = 0;
struct common_header *cmm_hdr = NULL;
- uint64_t generic_ptr;
+ uintptr_t generic_ptr;
struct cam_control *ioctl_ctrl = NULL;
struct cam_config_dev_cmd dev_config;
struct i2c_settings_array *i2c_reg_settings = NULL;
struct cam_cmd_buf_desc *cmd_desc = NULL;
- uint64_t generic_pkt_addr;
+ uintptr_t generic_pkt_addr;
size_t pkt_len;
struct cam_packet *csl_packet = NULL;
size_t len_of_buff = 0;
@@ -440,11 +441,12 @@ static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg)
struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
ioctl_ctrl = (struct cam_control *)arg;
- if (copy_from_user(&dev_config, (void __user *) ioctl_ctrl->handle,
+ if (copy_from_user(&dev_config,
+ u64_to_user_ptr(ioctl_ctrl->handle),
sizeof(dev_config)))
return -EFAULT;
rc = cam_mem_get_cpu_buf(dev_config.packet_handle,
- (uint64_t *)&generic_pkt_addr, &pkt_len);
+ &generic_pkt_addr, &pkt_len);
if (rc) {
CAM_ERR(CAM_OIS,
"error in converting command Handle Error: %d", rc);
@@ -459,7 +461,7 @@ static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg)
}
csl_packet = (struct cam_packet *)
- (generic_pkt_addr + dev_config.offset);
+ (generic_pkt_addr + (uint32_t)dev_config.offset);
switch (csl_packet->header.op_code & 0xFFFFFF) {
case CAM_OIS_PACKET_OPCODE_INIT:
offset = (uint32_t *)&csl_packet->payload;
@@ -473,7 +475,7 @@ static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg)
continue;
rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
- (uint64_t *)&generic_ptr, &len_of_buff);
+ &generic_ptr, &len_of_buff);
if (rc < 0) {
CAM_ERR(CAM_OIS, "Failed to get cpu buf");
return rc;
@@ -716,7 +718,7 @@ int cam_ois_driver_cmd(struct cam_ois_ctrl_t *o_ctrl, void *arg)
case CAM_QUERY_CAP:
ois_cap.slot_info = o_ctrl->soc_info.index;
- if (copy_to_user((void __user *) cmd->handle,
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
&ois_cap,
sizeof(struct cam_ois_query_cap_t))) {
CAM_ERR(CAM_OIS, "Failed Copy to User");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
index 3b7195e5c7a3..96b1a981c900 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,7 @@
#include <cam_mem_mgr.h>
#include <cam_subdev.h>
#include "cam_soc_util.h"
+#include "cam_context.h"
#define DEFINE_MSM_MUTEX(mutexname) \
static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
@@ -90,6 +91,7 @@ struct cam_ois_intf_params {
/**
* struct cam_ois_ctrl_t - OIS ctrl private data
+ * @device_name : ois device_name
* @pdev : platform device
* @ois_mutex : ois mutex
* @soc_info : ois soc related info
@@ -110,6 +112,7 @@ struct cam_ois_intf_params {
*
*/
struct cam_ois_ctrl_t {
+ char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
struct platform_device *pdev;
struct mutex ois_mutex;
struct cam_hw_soc_info soc_info;
@@ -122,7 +125,6 @@ struct cam_ois_ctrl_t {
struct i2c_settings_array i2c_mode_data;
enum msm_camera_device_type_t ois_device_type;
enum cam_ois_state cam_ois_state;
- char device_name[20];
char ois_name[32];
uint8_t ois_fw_flag;
uint8_t is_ois_calib;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index a2431be71176..c88e96980ff9 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -16,6 +16,7 @@
#include "cam_sensor_util.h"
#include "cam_soc_util.h"
#include "cam_trace.h"
+#include "cam_common_util.h"
static void cam_sensor_update_req_mgr(
struct cam_sensor_ctrl_t *s_ctrl,
@@ -62,30 +63,12 @@ static void cam_sensor_release_stream_rsc(
}
}
-static void cam_sensor_release_resource(
+static void cam_sensor_release_per_frame_resource(
struct cam_sensor_ctrl_t *s_ctrl)
{
struct i2c_settings_array *i2c_set = NULL;
int i, rc;
- i2c_set = &(s_ctrl->i2c_data.init_settings);
- if (i2c_set->is_settings_valid == 1) {
- i2c_set->is_settings_valid = -1;
- rc = delete_request(i2c_set);
- if (rc < 0)
- CAM_ERR(CAM_SENSOR,
- "failed while deleting Init settings");
- }
-
- i2c_set = &(s_ctrl->i2c_data.config_settings);
- if (i2c_set->is_settings_valid == 1) {
- i2c_set->is_settings_valid = -1;
- rc = delete_request(i2c_set);
- if (rc < 0)
- CAM_ERR(CAM_SENSOR,
- "failed while deleting Res settings");
- }
-
if (s_ctrl->i2c_data.per_frame != NULL) {
for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
i2c_set = &(s_ctrl->i2c_data.per_frame[i]);
@@ -105,7 +88,7 @@ static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
void *arg)
{
int32_t rc = 0;
- uint64_t generic_ptr;
+ uintptr_t generic_ptr;
struct cam_control *ioctl_ctrl = NULL;
struct cam_packet *csl_packet = NULL;
struct cam_cmd_buf_desc *cmd_desc = NULL;
@@ -122,13 +105,14 @@ static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
return -EINVAL;
}
- if (copy_from_user(&config, (void __user *) ioctl_ctrl->handle,
+ if (copy_from_user(&config,
+ u64_to_user_ptr(ioctl_ctrl->handle),
sizeof(config)))
return -EFAULT;
rc = cam_mem_get_cpu_buf(
config.packet_handle,
- (uint64_t *)&generic_ptr,
+ &generic_ptr,
&len_of_buff);
if (rc < 0) {
CAM_ERR(CAM_SENSOR, "Failed in getting the buffer: %d", rc);
@@ -136,7 +120,7 @@ static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
}
csl_packet = (struct cam_packet *)(generic_ptr +
- config.offset);
+ (uint32_t)config.offset);
if (config.offset > len_of_buff) {
CAM_ERR(CAM_SENSOR,
"offset is out of bounds: off: %lld len: %zu",
@@ -403,15 +387,16 @@ int32_t cam_handle_cmd_buffers_for_probe(void *cmd_buf,
int32_t cam_handle_mem_ptr(uint64_t handle, struct cam_sensor_ctrl_t *s_ctrl)
{
int rc = 0, i;
- void *packet = NULL, *cmd_buf1 = NULL;
uint32_t *cmd_buf;
void *ptr;
size_t len;
struct cam_packet *pkt;
struct cam_cmd_buf_desc *cmd_desc;
+ uintptr_t cmd_buf1 = 0;
+ uintptr_t packet = 0;
rc = cam_mem_get_cpu_buf(handle,
- (uint64_t *)&packet, &len);
+ &packet, &len);
if (rc < 0) {
CAM_ERR(CAM_SENSOR, "Failed to get the command Buffer");
return -EINVAL;
@@ -432,7 +417,7 @@ int32_t cam_handle_mem_ptr(uint64_t handle, struct cam_sensor_ctrl_t *s_ctrl)
if (!(cmd_desc[i].length))
continue;
rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
- (uint64_t *)&cmd_buf1, &len);
+ &cmd_buf1, &len);
if (rc < 0) {
CAM_ERR(CAM_SENSOR,
"Failed to parse the command Buffer Header");
@@ -503,10 +488,9 @@ void cam_sensor_shutdown(struct cam_sensor_ctrl_t *s_ctrl)
(s_ctrl->is_probe_succeed == 0))
return;
- cam_sensor_release_resource(s_ctrl);
cam_sensor_release_stream_rsc(s_ctrl);
- if (s_ctrl->sensor_state >= CAM_SENSOR_ACQUIRE)
- cam_sensor_power_down(s_ctrl);
+ cam_sensor_release_per_frame_resource(s_ctrl);
+ cam_sensor_power_down(s_ctrl);
rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
if (rc < 0)
@@ -676,7 +660,8 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
goto release_mutex;
}
rc = copy_from_user(&sensor_acq_dev,
- (void __user *) cmd->handle, sizeof(sensor_acq_dev));
+ u64_to_user_ptr(cmd->handle),
+ sizeof(sensor_acq_dev));
if (rc < 0) {
CAM_ERR(CAM_SENSOR, "Failed Copying from user");
goto release_mutex;
@@ -695,7 +680,8 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
CAM_DBG(CAM_SENSOR, "Device Handle: %d",
sensor_acq_dev.device_handle);
- if (copy_to_user((void __user *) cmd->handle, &sensor_acq_dev,
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &sensor_acq_dev,
sizeof(struct cam_sensor_acquire_dev))) {
CAM_ERR(CAM_SENSOR, "Failed Copy to User");
rc = -EFAULT;
@@ -731,7 +717,7 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
goto release_mutex;
}
- cam_sensor_release_resource(s_ctrl);
+ cam_sensor_release_per_frame_resource(s_ctrl);
cam_sensor_release_stream_rsc(s_ctrl);
if (s_ctrl->bridge_intf.device_hdl == -1) {
CAM_ERR(CAM_SENSOR,
@@ -762,8 +748,8 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
struct cam_sensor_query_cap sensor_cap;
cam_sensor_query_cap(s_ctrl, &sensor_cap);
- if (copy_to_user((void __user *) cmd->handle, &sensor_cap,
- sizeof(struct cam_sensor_query_cap))) {
+ if (copy_to_user(u64_to_user_ptr(cmd->handle),
+ &sensor_cap, sizeof(struct cam_sensor_query_cap))) {
CAM_ERR(CAM_SENSOR, "Failed Copy to User");
rc = -EFAULT;
goto release_mutex;
@@ -816,7 +802,7 @@ int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
}
}
- cam_sensor_release_resource(s_ctrl);
+ cam_sensor_release_per_frame_resource(s_ctrl);
s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
CAM_INFO(CAM_SENSOR,
"CAM_STOP_DEV Success, sensor_id:0x%x,sensor_slave_addr:0x%x",
@@ -918,6 +904,8 @@ int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
CAM_ERR(CAM_SENSOR, "Device data is NULL");
return -EINVAL;
}
+
+ mutex_lock(&s_ctrl->cam_sensor_mutex);
if (link->link_enable) {
s_ctrl->bridge_intf.link_hdl = link->link_hdl;
s_ctrl->bridge_intf.crm_cb = link->crm_cb;
@@ -925,6 +913,7 @@ int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
s_ctrl->bridge_intf.link_hdl = -1;
s_ctrl->bridge_intf.crm_cb = NULL;
}
+ mutex_unlock(&s_ctrl->cam_sensor_mutex);
return 0;
}
@@ -1005,7 +994,7 @@ int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl)
CAM_ERR(CAM_SENSOR, "failed: power_info %pK", power_info);
return -EINVAL;
}
- rc = msm_camera_power_down(power_info, soc_info);
+ rc = cam_sensor_util_power_down(power_info, soc_info);
if (rc < 0) {
CAM_ERR(CAM_SENSOR, "power down the core is failed:%d", rc);
return rc;
@@ -1155,8 +1144,10 @@ int32_t cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply)
}
CAM_DBG(CAM_REQ, " Sensor update req id: %lld", apply->request_id);
trace_cam_apply_req("Sensor", apply->request_id);
+ mutex_lock(&(s_ctrl->cam_sensor_mutex));
rc = cam_sensor_apply_settings(s_ctrl, apply->request_id,
CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE);
+ mutex_unlock(&(s_ctrl->cam_sensor_mutex));
return rc;
}
@@ -1190,7 +1181,9 @@ int32_t cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush_req)
continue;
if (i2c_set->is_settings_valid == 1) {
+ mutex_lock(&(s_ctrl->cam_sensor_mutex));
rc = delete_request(i2c_set);
+ mutex_unlock(&(s_ctrl->cam_sensor_mutex));
if (rc < 0)
CAM_ERR(CAM_SENSOR,
"delete request: %lld rc: %d",
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
index 34f8b8dba696..38205098aa83 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,6 +32,7 @@
#include <cam_subdev.h>
#include <cam_sensor_io.h>
#include "cam_debug_util.h"
+#include "cam_context.h"
#define NUM_MASTERS 2
#define NUM_QUEUES 2
@@ -74,6 +75,7 @@ struct intf_params {
/**
* struct cam_sensor_ctrl_t: Camera control structure
+ * @device_name: Sensor device name
* @pdev: Platform device
* @cam_sensor_mutex: Sensor mutex
* @sensordata: Sensor board Information
@@ -89,13 +91,13 @@ struct intf_params {
* @i2c_data: Sensor I2C register settings
* @sensor_info: Sensor query cap structure
* @bridge_intf: Bridge interface structure
- * @device_name: Sensor device structure
* @streamon_count: Count to hold the number of times stream on called
* @streamoff_count: Count to hold the number of times stream off called
* @bob_reg_index: Hold to BoB regulator index
* @bob_pwm_switch: Boolean flag to switch into PWM mode for BoB regulator
*/
struct cam_sensor_ctrl_t {
+ char device_name[CAM_CTX_DEV_NAME_MAX_LENGTH];
struct platform_device *pdev;
struct cam_hw_soc_info soc_info;
struct mutex cam_sensor_mutex;
@@ -112,7 +114,6 @@ struct cam_sensor_ctrl_t {
struct i2c_data_settings i2c_data;
struct cam_sensor_query_cap sensor_info;
struct intf_params bridge_intf;
- char device_name[20];
uint32_t streamon_count;
uint32_t streamoff_count;
int bob_reg_index;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 46bda0589c52..5c1143ece162 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -296,7 +296,7 @@ int cam_sensor_i2c_command_parser(
{
int16_t rc = 0, i = 0;
size_t len_of_buff = 0;
- uint64_t generic_ptr;
+ uintptr_t generic_ptr;
uint16_t cmd_length_in_bytes = 0;
for (i = 0; i < num_cmd_buffers; i++) {
@@ -318,11 +318,11 @@ int cam_sensor_i2c_command_parser(
continue;
rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
- (uint64_t *)&generic_ptr, &len_of_buff);
+ &generic_ptr, &len_of_buff);
cmd_buf = (uint32_t *)generic_ptr;
if (rc < 0) {
CAM_ERR(CAM_SENSOR,
- "cmd hdl failed:%d, Err: %d, Buffer_len: %ld",
+ "cmd hdl failed:%d, Err: %d, Buffer_len: %zd",
cmd_desc[i].mem_handle, rc, len_of_buff);
return rc;
}
@@ -439,6 +439,75 @@ int cam_sensor_i2c_command_parser(
return rc;
}
+int cam_sensor_util_i2c_apply_setting(
+ struct camera_io_master *io_master_info,
+ struct i2c_settings_list *i2c_list)
+{
+ int32_t rc = 0;
+ uint32_t i, size;
+
+ switch (i2c_list->op_code) {
+ case CAM_SENSOR_I2C_WRITE_RANDOM: {
+ rc = camera_io_dev_write(io_master_info,
+ &(i2c_list->i2c_settings));
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to random write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ break;
+ }
+ case CAM_SENSOR_I2C_WRITE_SEQ: {
+ rc = camera_io_dev_write_continuous(
+ io_master_info, &(i2c_list->i2c_settings), 0);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to seq write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ break;
+ }
+ case CAM_SENSOR_I2C_WRITE_BURST: {
+ rc = camera_io_dev_write_continuous(
+ io_master_info, &(i2c_list->i2c_settings), 1);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "Failed to burst write I2C settings: %d",
+ rc);
+ return rc;
+ }
+ break;
+ }
+ case CAM_SENSOR_I2C_POLL: {
+ size = i2c_list->i2c_settings.size;
+ for (i = 0; i < size; i++) {
+ rc = camera_io_dev_poll(
+ io_master_info,
+ i2c_list->i2c_settings.reg_setting[i].reg_addr,
+ i2c_list->i2c_settings.reg_setting[i].reg_data,
+ i2c_list->i2c_settings.reg_setting[i].data_mask,
+ i2c_list->i2c_settings.addr_type,
+ i2c_list->i2c_settings.data_type,
+ i2c_list->i2c_settings.reg_setting[i].delay);
+ if (rc < 0) {
+ CAM_ERR(CAM_SENSOR,
+ "i2c poll apply setting Fail: %d", rc);
+ return rc;
+ }
+ }
+ break;
+ }
+ default:
+ CAM_ERR(CAM_SENSOR, "Wrong Opcode: %d", i2c_list->op_code);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
int32_t msm_camera_fill_vreg_params(
struct cam_hw_soc_info *soc_info,
struct cam_sensor_power_setting *power_setting,
@@ -1710,7 +1779,7 @@ msm_camera_get_power_settings(struct cam_sensor_power_ctrl_t *ctrl,
return ps;
}
-int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl,
struct cam_hw_soc_info *soc_info)
{
int index = 0, ret = 0, num_vreg = 0, i;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
index 6c0287e48487..583ddb14243b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
@@ -39,6 +39,9 @@ int cam_sensor_i2c_command_parser(struct camera_io_master *io_master,
struct i2c_settings_array *i2c_reg_settings,
struct cam_cmd_buf_desc *cmd_desc, int32_t num_cmd_buffers);
+int cam_sensor_util_i2c_apply_setting(struct camera_io_master *io_master_info,
+ struct i2c_settings_list *i2c_list);
+
int32_t delete_request(struct i2c_settings_array *i2c_array);
int cam_sensor_util_request_gpio_table(
struct cam_hw_soc_info *soc_info, int gpio_en);
@@ -49,7 +52,7 @@ int cam_sensor_util_init_gpio_pin_tbl(
int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
struct cam_hw_soc_info *soc_info);
-int msm_camera_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl,
struct cam_hw_soc_info *soc_info);
int msm_camera_fill_vreg_params(struct cam_hw_soc_info *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_smmu/Makefile b/drivers/media/platform/msm/camera/cam_smmu/Makefile
index e17dac6c0d9d..96f39680fd0c 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/Makefile
+++ b/drivers/media/platform/msm/camera/cam_smmu/Makefile
@@ -1,3 +1,4 @@
ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu_api.o
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index 52da37f8239b..7a489d7204ff 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,7 @@
#include <linux/genalloc.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/secure_buffer.h>
+#include <uapi/media/cam_req_mgr.h>
#include "cam_smmu_api.h"
#include "cam_debug_util.h"
@@ -34,11 +35,14 @@
#define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE)
#define COOKIE_MASK ((1<<COOKIE_SIZE)-1)
#define HANDLE_INIT (-1)
-#define CAM_SMMU_CB_MAX 2
+#define CAM_SMMU_CB_MAX 5
#define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
#define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
+static int g_num_pf_handled = 4;
+module_param(g_num_pf_handled, int, 0644);
+
struct firmware_alloc_info {
struct device *fw_dev;
void *fw_kva;
@@ -129,12 +133,11 @@ struct cam_context_bank_info {
int handle;
enum cam_smmu_ops_param state;
- void (*handler[CAM_SMMU_CB_MAX])(struct iommu_domain *,
- struct device *, unsigned long,
- int, void*);
+ cam_smmu_client_page_fault_handler handler[CAM_SMMU_CB_MAX];
void *token[CAM_SMMU_CB_MAX];
int cb_count;
int secure_count;
+ int pf_count;
};
struct cam_iommu_cb_set {
@@ -252,13 +255,14 @@ static void cam_smmu_print_table(void);
static int cam_smmu_probe(struct platform_device *pdev);
-static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr);
+static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr);
static void cam_smmu_page_fault_work(struct work_struct *work)
{
int j;
int idx;
struct cam_smmu_work_payload *payload;
+ uint32_t buf_info;
mutex_lock(&iommu_cb_set.payload_list_lock);
if (list_empty(&iommu_cb_set.payload_list)) {
@@ -275,8 +279,11 @@ static void cam_smmu_page_fault_work(struct work_struct *work)
/* Dereference the payload to call the handler */
idx = payload->idx;
- mutex_lock(&iommu_cb_set.cb_info[idx].lock);
- cam_smmu_check_vaddr_in_range(idx, (void *)payload->iova);
+ buf_info = cam_smmu_find_closest_mapping(idx, (void *)payload->iova);
+ if (buf_info != 0) {
+ CAM_INFO(CAM_SMMU, "closest buf 0x%x idx %d", buf_info, idx);
+ }
+
for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
if ((iommu_cb_set.cb_info[idx].handler[j])) {
iommu_cb_set.cb_info[idx].handler[j](
@@ -284,10 +291,10 @@ static void cam_smmu_page_fault_work(struct work_struct *work)
payload->dev,
payload->iova,
payload->flags,
- iommu_cb_set.cb_info[idx].token[j]);
+ iommu_cb_set.cb_info[idx].token[j],
+ buf_info);
}
}
- mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
kfree(payload);
}
@@ -333,10 +340,13 @@ static void cam_smmu_print_table(void)
}
}
-static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr)
+static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr)
{
- struct cam_dma_buff_info *mapping;
+ struct cam_dma_buff_info *mapping, *closest_mapping = NULL;
unsigned long start_addr, end_addr, current_addr;
+ uint32_t buf_handle = 0;
+
+ long delta = 0, lowest_delta = 0;
current_addr = (unsigned long)vaddr;
list_for_each_entry(mapping,
@@ -344,31 +354,51 @@ static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr)
start_addr = (unsigned long)mapping->paddr;
end_addr = (unsigned long)mapping->paddr + mapping->len;
- if (start_addr <= current_addr && current_addr < end_addr) {
- CAM_ERR(CAM_SMMU,
- "va %pK valid: range:%pK-%pK, fd = %d cb: %s",
- vaddr, (void *)start_addr, (void *)end_addr,
- mapping->ion_fd,
+ if (start_addr <= current_addr && current_addr <= end_addr) {
+ closest_mapping = mapping;
+ CAM_INFO(CAM_SMMU,
+ "Found va 0x%lx in:0x%lx-0x%lx, fd %d cb:%s",
+ current_addr, start_addr,
+ end_addr, mapping->ion_fd,
iommu_cb_set.cb_info[idx].name);
goto end;
} else {
+ if (start_addr > current_addr)
+ delta = start_addr - current_addr;
+ else
+ delta = current_addr - end_addr - 1;
+
+ if (delta < lowest_delta || lowest_delta == 0) {
+ lowest_delta = delta;
+ closest_mapping = mapping;
+ }
CAM_DBG(CAM_SMMU,
- "va %pK is not in this range: %pK-%pK, fd = %d",
- vaddr, (void *)start_addr, (void *)end_addr,
- mapping->ion_fd);
+ "approx va %lx not in range: %lx-%lx fd = %0x",
+ current_addr, start_addr,
+ end_addr, mapping->ion_fd);
}
}
- CAM_ERR(CAM_SMMU,
- "Cannot find vaddr:%pK in SMMU %s uses invalid virt address",
- vaddr, iommu_cb_set.cb_info[idx].name);
-end:
- return;
-}
-void cam_smmu_reg_client_page_fault_handler(int handle,
- void (*client_page_fault_handler)(struct iommu_domain *,
- struct device *, unsigned long,
- int, void*), void *token)
+end:
+ if (closest_mapping) {
+ buf_handle = GET_MEM_HANDLE(idx, closest_mapping->ion_fd);
+ CAM_INFO(CAM_SMMU,
+ "Closest map fd %d 0x%lx 0x%lx-0x%lx buf=%pK mem %0x",
+ closest_mapping->ion_fd, current_addr,
+ (unsigned long)closest_mapping->paddr,
+ (unsigned long)closest_mapping->paddr + mapping->len,
+ closest_mapping->buf,
+ buf_handle);
+ } else
+ CAM_INFO(CAM_SMMU,
+ "Cannot find vaddr:%lx in SMMU %s virt address",
+ current_addr, iommu_cb_set.cb_info[idx].name);
+
+ return buf_handle;
+}
+
+void cam_smmu_set_client_page_fault_handler(int handle,
+ cam_smmu_client_page_fault_handler handler_cb, void *token)
{
int idx, i = 0;
@@ -394,7 +424,7 @@ void cam_smmu_reg_client_page_fault_handler(int handle,
return;
}
- if (client_page_fault_handler) {
+ if (handler_cb) {
if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
CAM_ERR(CAM_SMMU,
"%s Should not regiester more handlers",
@@ -402,12 +432,14 @@ void cam_smmu_reg_client_page_fault_handler(int handle,
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return;
}
+
iommu_cb_set.cb_info[idx].cb_count++;
+
for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) {
if (iommu_cb_set.cb_info[idx].token[i] == NULL) {
iommu_cb_set.cb_info[idx].token[i] = token;
iommu_cb_set.cb_info[idx].handler[i] =
- client_page_fault_handler;
+ handler_cb;
break;
}
}
@@ -429,6 +461,47 @@ void cam_smmu_reg_client_page_fault_handler(int handle,
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
}
+void cam_smmu_unset_client_page_fault_handler(int handle, void *token)
+{
+ int idx, i = 0;
+
+ if (!token || (handle == HANDLE_INIT)) {
+ CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle");
+ return;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ CAM_ERR(CAM_SMMU,
+ "Error: handle or index invalid. idx = %d hdl = %x",
+ idx, handle);
+ return;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ CAM_ERR(CAM_SMMU,
+ "Error: hdl is not valid, table_hdl = %x, hdl = %x",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return;
+ }
+
+ for (i = 0; i < CAM_SMMU_CB_MAX; i++) {
+ if (iommu_cb_set.cb_info[idx].token[i] == token) {
+ iommu_cb_set.cb_info[idx].token[i] = NULL;
+ iommu_cb_set.cb_info[idx].handler[i] =
+ NULL;
+ iommu_cb_set.cb_info[idx].cb_count--;
+ break;
+ }
+ }
+ if (i == CAM_SMMU_CB_MAX)
+ CAM_ERR(CAM_SMMU, "Error: hdl %x no matching tokens: %s",
+ handle, iommu_cb_set.cb_info[idx].name);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+}
+
static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
struct device *dev, unsigned long iova,
int flags, void *token)
@@ -459,6 +532,13 @@ static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
return -EINVAL;
}
+ if (++iommu_cb_set.cb_info[idx].pf_count > g_num_pf_handled) {
+ CAM_INFO(CAM_SMMU, "PF already handled %d %d %d",
+ g_num_pf_handled, idx,
+ iommu_cb_set.cb_info[idx].pf_count);
+ return -EINVAL;
+ }
+
payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC);
if (!payload)
return -EINVAL;
@@ -474,7 +554,7 @@ static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
list_add_tail(&payload->list, &iommu_cb_set.payload_list);
mutex_unlock(&iommu_cb_set.payload_list_lock);
- schedule_work(&iommu_cb_set.smmu_work);
+ cam_smmu_page_fault_work(&iommu_cb_set.smmu_work);
return -EINVAL;
}
@@ -528,6 +608,7 @@ void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops)
iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH;
iommu_cb_set.cb_info[i].dev = NULL;
iommu_cb_set.cb_info[i].cb_count = 0;
+ iommu_cb_set.cb_info[i].pf_count = 0;
for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
iommu_cb_set.cb_info[i].token[j] = NULL;
iommu_cb_set.cb_info[i].handler[j] = NULL;
@@ -1009,7 +1090,7 @@ get_addr_end:
int cam_smmu_alloc_firmware(int32_t smmu_hdl,
dma_addr_t *iova,
- uint64_t *cpuva,
+ uintptr_t *cpuva,
size_t *len)
{
int rc;
@@ -1078,7 +1159,7 @@ int cam_smmu_alloc_firmware(int32_t smmu_hdl,
iommu_cb_set.cb_info[idx].is_fw_allocated = true;
*iova = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
- *cpuva = (uint64_t)icp_fw.fw_kva;
+ *cpuva = (uintptr_t)icp_fw.fw_kva;
*len = firmware_len;
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -2924,7 +3005,7 @@ int cam_smmu_destroy_handle(int handle)
cam_smmu_clean_kernel_buffer_list(idx);
}
- if (&iommu_cb_set.cb_info[idx].is_secure) {
+ if (iommu_cb_set.cb_info[idx].is_secure) {
if (iommu_cb_set.cb_info[idx].secure_count == 0) {
mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
return -EPERM;
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
index 254e382a99ad..caf326d6c716 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -51,6 +51,21 @@ enum cam_smmu_region_id {
};
/**
+ * @brief : Callback function type that gets called back on cam
+ * smmu page fault.
+ *
+ * @param domain : Iommu domain received in iommu page fault handler
+ * @param dev : Device received in iommu page fault handler
+ * @param iova : IOVA where page fault occurred
+ * @param flags : Flags received in iommu page fault handler
+ * @param token : Userdata given during callback registration
+ * @param buf_info : Closest mapped buffer info
+ */
+typedef void (*cam_smmu_client_page_fault_handler)(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token,
+ uint32_t buf_info);
+
+/**
* @brief : Structure to store region information
*
* @param iova_start : Start address of region
@@ -215,13 +230,19 @@ int cam_smmu_find_index_by_handle(int hdl);
* @brief : Registers smmu fault handler for client
*
* @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
- * @param client_page_fault_handler: It is triggered in IOMMU page fault
+ * @param handler_cb: It is triggered in IOMMU page fault
+ * @param token: It is input param when trigger page fault handler
+ */
+void cam_smmu_set_client_page_fault_handler(int handle,
+ cam_smmu_client_page_fault_handler handler_cb, void *token);
+
+/**
+ * @brief : Unregisters smmu fault handler for client
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
* @param token: It is input param when trigger page fault handler
*/
-void cam_smmu_reg_client_page_fault_handler(int handle,
- void (*client_page_fault_handler)(struct iommu_domain *,
- struct device *, unsigned long,
- int, void*), void *token);
+void cam_smmu_unset_client_page_fault_handler(int handle, void *token);
/**
* @brief Maps memory from an ION fd into IOVA space
@@ -297,7 +318,7 @@ int cam_smmu_unmap_stage2_iova(int handle, int ion_fd);
*/
int cam_smmu_alloc_firmware(int32_t smmu_hdl,
dma_addr_t *iova,
- uint64_t *kvaddr,
+ uintptr_t *kvaddr,
size_t *len);
/**
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 4525bb5bce25..c9e6e5fe7f61 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -426,7 +426,7 @@ static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
return -EINVAL;
if (copy_from_user(&sync_create,
- (void *)k_ioctl->ioctl_ptr,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
@@ -434,7 +434,8 @@ static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
sync_create.name);
if (!result)
- if (copy_to_user((void *)k_ioctl->ioctl_ptr,
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
&sync_create,
k_ioctl->size))
return -EFAULT;
@@ -453,7 +454,7 @@ static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
return -EINVAL;
if (copy_from_user(&sync_signal,
- (void *)k_ioctl->ioctl_ptr,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
@@ -478,7 +479,7 @@ static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
return -EINVAL;
if (copy_from_user(&sync_merge,
- (void *)k_ioctl->ioctl_ptr,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
@@ -492,8 +493,8 @@ static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
return -ENOMEM;
if (copy_from_user(sync_objs,
- (void *)sync_merge.sync_objs,
- sizeof(uint32_t) * sync_merge.num_objs)) {
+ u64_to_user_ptr(sync_merge.sync_objs),
+ sizeof(uint32_t) * sync_merge.num_objs)) {
kfree(sync_objs);
return -EFAULT;
}
@@ -505,7 +506,8 @@ static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
&sync_merge.merged);
if (!result)
- if (copy_to_user((void *)k_ioctl->ioctl_ptr,
+ if (copy_to_user(
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
&sync_merge,
k_ioctl->size)) {
kfree(sync_objs);
@@ -528,7 +530,7 @@ static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
return -EINVAL;
if (copy_from_user(&sync_wait,
- (void *)k_ioctl->ioctl_ptr,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
@@ -549,7 +551,7 @@ static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
return -EINVAL;
if (copy_from_user(&sync_create,
- (void *)k_ioctl->ioctl_ptr,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
@@ -573,7 +575,7 @@ static int cam_sync_handle_register_user_payload(
return -EINVAL;
if (copy_from_user(&userpayload_info,
- (void *)k_ioctl->ioctl_ptr,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
@@ -654,7 +656,7 @@ static int cam_sync_handle_deregister_user_payload(
}
if (copy_from_user(&userpayload_info,
- (void *)k_ioctl->ioctl_ptr,
+ u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
index 3e1281b62567..47d441fe3aa1 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
@@ -13,8 +13,14 @@
#ifndef _CAM_COMMON_UTIL_H_
#define _CAM_COMMON_UTIL_H_
+#include <linux/types.h>
+#include <linux/kernel.h>
+
#define CAM_BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+#define PTR_TO_U64(ptr) ((uint64_t)(uintptr_t)ptr)
+#define U64_TO_PTR(ptr) ((void *)(uintptr_t)ptr)
+
/**
* cam_common_util_get_string_index()
*
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index a88ccdb93641..66a4487af172 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -21,7 +21,7 @@ int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
size_t *len)
{
int rc = 0;
- uint64_t kmd_buf_addr = 0;
+ uintptr_t kmd_buf_addr = 0;
rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len);
if (rc) {
@@ -30,7 +30,7 @@ int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
if (kmd_buf_addr && *len) {
*buf_addr = (uint32_t *)kmd_buf_addr;
} else {
- CAM_ERR(CAM_UTIL, "Invalid addr and length :%ld", *len);
+ CAM_ERR(CAM_UTIL, "Invalid addr and length :%zd", *len);
rc = -ENOMEM;
}
}
@@ -101,7 +101,7 @@ int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
return rc;
if (len < cmd_desc->size) {
- CAM_ERR(CAM_UTIL, "invalid memory len:%ld and cmd desc size:%d",
+ CAM_ERR(CAM_UTIL, "invalid memory len:%zd and cmd desc size:%d",
len, cmd_desc->size);
return -EINVAL;
}
@@ -128,7 +128,7 @@ int cam_packet_util_process_patches(struct cam_packet *packet,
{
struct cam_patch_desc *patch_desc = NULL;
dma_addr_t iova_addr;
- uint64_t cpu_addr;
+ uintptr_t cpu_addr;
uint32_t temp;
uint32_t *dst_cpu_addr;
uint32_t *src_buf_iova_addr;
@@ -209,7 +209,7 @@ int cam_packet_util_process_generic_cmd_buffer(
cam_packet_generic_blob_handler blob_handler_cb, void *user_data)
{
int rc;
- uint64_t cpu_addr;
+ uintptr_t cpu_addr;
size_t buf_size;
uint32_t *blob_ptr;
uint32_t blob_type, blob_size, blob_block_size, len_read;
@@ -233,7 +233,8 @@ int cam_packet_util_process_generic_cmd_buffer(
return rc;
}
- blob_ptr = (uint32_t *)((uint8_t *)cpu_addr + cmd_buf->offset);
+ blob_ptr = (uint32_t *)(((uint8_t *)cpu_addr) +
+ cmd_buf->offset);
CAM_DBG(CAM_UTIL,
"GenericCmdBuffer cpuaddr=%pK, blobptr=%pK, len=%d",
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
index a5456a9bec72..37d7e7d2b6d9 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -100,8 +100,10 @@ static const char *cam_soc_util_get_string_from_level(
return "SVSL1[4]";
case CAM_NOMINAL_VOTE:
return "NOM[5]";
+ case CAM_NOMINALL1_VOTE:
+ return "NOML1[6]";
case CAM_TURBO_VOTE:
- return "TURBO[6]";
+ return "TURBO[7]";
default:
return "";
}
@@ -281,6 +283,8 @@ int cam_soc_util_get_level_from_string(const char *string,
*level = CAM_SVSL1_VOTE;
} else if (!strcmp(string, "nominal")) {
*level = CAM_NOMINAL_VOTE;
+ } else if (!strcmp(string, "nominal_l1")) {
+ *level = CAM_NOMINALL1_VOTE;
} else if (!strcmp(string, "turbo")) {
*level = CAM_TURBO_VOTE;
} else {
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
index 4c6ed4b5197a..d7432d9b75b1 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -50,14 +50,15 @@
/**
* enum cam_vote_level - Enum for voting level
*
- * @CAM_SUSPEND_VOTE : Suspend vote
- * @CAM_MINSVS_VOTE : Min SVS vote
- * @CAM_LOWSVS_VOTE : Low SVS vote
- * @CAM_SVS_VOTE : SVS vote
- * @CAM_SVSL1_VOTE : SVS Plus vote
- * @CAM_NOMINAL_VOTE : Nominal vote
- * @CAM_TURBO_VOTE : Turbo vote
- * @CAM_MAX_VOTE : Max voting level, This is invalid level.
+ * @CAM_SUSPEND_VOTE : Suspend vote
+ * @CAM_MINSVS_VOTE : Min SVS vote
+ * @CAM_LOWSVS_VOTE : Low SVS vote
+ * @CAM_SVS_VOTE : SVS vote
+ * @CAM_SVSL1_VOTE : SVS Plus vote
+ * @CAM_NOMINAL_VOTE : Nominal vote
+ * @CAM_NOMINALL1_VOTE: Nominal plus vote
+ * @CAM_TURBO_VOTE : Turbo vote
+ * @CAM_MAX_VOTE : Max voting level, This is invalid level.
*/
enum cam_vote_level {
CAM_SUSPEND_VOTE,
@@ -66,6 +67,7 @@ enum cam_vote_level {
CAM_SVS_VOTE,
CAM_SVSL1_VOTE,
CAM_NOMINAL_VOTE,
+ CAM_NOMINALL1_VOTE,
CAM_TURBO_VOTE,
CAM_MAX_VOTE,
};
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 44cc7dcea685..f1be1a7ddded 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -365,9 +365,10 @@ static int hfi_process_session_error(u32 device_id,
}
static int hfi_process_event_notify(u32 device_id,
- struct hfi_msg_event_notify_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_event_notify_packet *pkt = _pkt;
dprintk(VIDC_DBG, "Received: EVENT_NOTIFY\n");
if (pkt->size < sizeof(struct hfi_msg_event_notify_packet)) {
@@ -406,9 +407,10 @@ static int hfi_process_event_notify(u32 device_id,
}
static int hfi_process_sys_init_done(u32 device_id,
- struct hfi_msg_sys_init_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_init_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
enum vidc_status status = VIDC_ERR_NONE;
@@ -445,9 +447,10 @@ err_no_prop:
}
static int hfi_process_sys_rel_resource_done(u32 device_id,
- struct hfi_msg_sys_release_resource_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_release_resource_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
enum vidc_status status = VIDC_ERR_NONE;
u32 pkt_size;
@@ -751,6 +754,11 @@ static int hfi_fill_codec_info(u8 *data_ptr,
vidc_get_hal_codec((1 << i) & codecs);
capability->domain =
vidc_get_hal_domain(HFI_VIDEO_DOMAIN_DECODER);
+ if (codec_count == VIDC_MAX_DECODE_SESSIONS) {
+ dprintk(VIDC_ERR,
+ "Max supported decoder sessions reached");
+ break;
+ }
}
}
codecs = sys_init_done->enc_codec_supported;
@@ -762,6 +770,11 @@ static int hfi_fill_codec_info(u8 *data_ptr,
vidc_get_hal_codec((1 << i) & codecs);
capability->domain =
vidc_get_hal_domain(HFI_VIDEO_DOMAIN_ENCODER);
+ if (codec_count == VIDC_MAX_SESSIONS) {
+ dprintk(VIDC_ERR,
+ "Max supported sessions reached");
+ break;
+ }
}
}
sys_init_done->codec_count = codec_count;
@@ -1200,9 +1213,10 @@ static void hfi_process_sess_get_prop_buf_req(
}
static int hfi_process_session_prop_info(u32 device_id,
- struct hfi_msg_session_property_info_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_property_info_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
struct buffer_requirements buff_req = { { {0} } };
@@ -1241,9 +1255,10 @@ static int hfi_process_session_prop_info(u32 device_id,
}
static int hfi_process_session_init_done(u32 device_id,
- struct hfi_msg_sys_session_init_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_session_init_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
struct vidc_hal_session_init_done session_init_done = { {0} };
@@ -1268,9 +1283,10 @@ static int hfi_process_session_init_done(u32 device_id,
}
static int hfi_process_session_load_res_done(u32 device_id,
- struct hfi_msg_session_load_resources_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_load_resources_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_LOAD_RESOURCES_DONE[%#x]\n",
@@ -1296,9 +1312,10 @@ static int hfi_process_session_load_res_done(u32 device_id,
}
static int hfi_process_session_flush_done(u32 device_id,
- struct hfi_msg_session_flush_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_flush_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_FLUSH_DONE[%#x]\n",
@@ -1339,9 +1356,10 @@ static int hfi_process_session_flush_done(u32 device_id,
}
static int hfi_process_session_etb_done(u32 device_id,
- struct hfi_msg_session_empty_buffer_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_empty_buffer_done_packet *pkt = _pkt;
struct msm_vidc_cb_data_done data_done = {0};
struct hfi_picture_type *hfi_picture_type = NULL;
@@ -1393,9 +1411,10 @@ static int hfi_process_session_etb_done(u32 device_id,
}
static int hfi_process_session_ftb_done(
- u32 device_id, struct vidc_hal_msg_pkt_hdr *msg_hdr,
+ u32 device_id, void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct vidc_hal_msg_pkt_hdr *msg_hdr = _pkt;
struct msm_vidc_cb_data_done data_done = {0};
bool is_decoder = false, is_encoder = false;
@@ -1517,9 +1536,10 @@ static int hfi_process_session_ftb_done(
}
static int hfi_process_session_start_done(u32 device_id,
- struct hfi_msg_session_start_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_start_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_START_DONE[%#x]\n",
@@ -1543,9 +1563,10 @@ static int hfi_process_session_start_done(u32 device_id,
}
static int hfi_process_session_stop_done(u32 device_id,
- struct hfi_msg_session_stop_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_stop_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_STOP_DONE[%#x]\n",
@@ -1570,9 +1591,10 @@ static int hfi_process_session_stop_done(u32 device_id,
}
static int hfi_process_session_rel_res_done(u32 device_id,
- struct hfi_msg_session_release_resources_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_release_resources_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_RELEASE_RESOURCES_DONE[%#x]\n",
@@ -1597,9 +1619,10 @@ static int hfi_process_session_rel_res_done(u32 device_id,
}
static int hfi_process_session_rel_buf_done(u32 device_id,
- struct hfi_msg_session_release_buffers_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_release_buffers_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
if (!pkt || pkt->size <
@@ -1630,9 +1653,10 @@ static int hfi_process_session_rel_buf_done(u32 device_id,
}
static int hfi_process_session_end_done(u32 device_id,
- struct hfi_msg_sys_session_end_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_session_end_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_END_DONE[%#x]\n", pkt->session_id);
@@ -1655,9 +1679,10 @@ static int hfi_process_session_end_done(u32 device_id,
}
static int hfi_process_session_abort_done(u32 device_id,
- struct hfi_msg_sys_session_abort_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_session_abort_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_ABORT_DONE[%#x]\n",
@@ -1725,9 +1750,10 @@ static void hfi_process_sys_get_prop_image_version(
}
static int hfi_process_sys_property_info(u32 device_id,
- struct hfi_msg_sys_property_info_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_property_info_packet *pkt = _pkt;
if (!pkt) {
dprintk(VIDC_ERR, "%s: invalid param\n", __func__);
return -EINVAL;
@@ -1759,7 +1785,7 @@ static int hfi_process_sys_property_info(u32 device_id,
}
static int hfi_process_ignore(u32 device_id,
- struct vidc_hal_msg_pkt_hdr *msg_hdr,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
*info = (struct msm_vidc_cb_info) {
@@ -1840,5 +1866,6 @@ int hfi_process_msg_packet(u32 device_id, struct vidc_hal_msg_pkt_hdr *msg_hdr,
break;
}
- return pkt_func ? pkt_func(device_id, msg_hdr, info) : -ENOTSUPP;
+ return pkt_func ?
+ pkt_func(device_id, (void *)msg_hdr, info) : -ENOTSUPP;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 4be087f286af..82cfe0bb3c80 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -885,6 +885,12 @@ int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst)
break;
}
}
+
+ if (!inst->clk_data.entry) {
+ dprintk(VIDC_ERR, "%s No match found\n", __func__);
+ rc = -EINVAL;
+ }
+
return rc;
}
@@ -900,7 +906,7 @@ void msm_clock_data_reset(struct msm_vidc_inst *inst)
dprintk(VIDC_DBG, "Init DCVS Load\n");
- if (!inst || !inst->core) {
+ if (!inst || !inst->core || !inst->clk_data.entry) {
dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n",
__func__, inst);
return;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index ed3815f1c829..6316b02cb4b9 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3087,7 +3087,11 @@ static int msm_comm_session_init(int flipped_state,
return -EINVAL;
}
- msm_comm_init_clocks_and_bus_data(inst);
+ rc = msm_comm_init_clocks_and_bus_data(inst);
+ if (rc) {
+ dprintk(VIDC_ERR, "Failed to initialize clocks and bus data\n");
+ goto exit;
+ }
dprintk(VIDC_DBG, "%s: inst %pK\n", __func__, inst);
rc = call_hfi_op(hdev, session_init, hdev->hfi_device_data,
@@ -5096,6 +5100,14 @@ int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags)
"Invalid params, inst %pK\n", inst);
return -EINVAL;
}
+
+ if (inst->state < MSM_VIDC_OPEN_DONE) {
+ dprintk(VIDC_ERR,
+ "Invalid state to call flush, inst %pK, state %#x\n",
+ inst, inst->state);
+ return -EINVAL;
+ }
+
core = inst->core;
hdev = core->device;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 1eae02e89326..9fa0dbb3858e 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -324,7 +324,7 @@ static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
{
struct hfi_queue_header *queue;
u32 packet_size_in_words, new_write_idx;
- u32 empty_space, read_idx;
+ u32 empty_space, read_idx, write_idx;
u32 *write_ptr;
if (!qinfo || !packet) {
@@ -347,16 +347,18 @@ static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
}
packet_size_in_words = (*(u32 *)packet) >> 2;
- if (!packet_size_in_words) {
- dprintk(VIDC_ERR, "Zero packet size\n");
+ if (!packet_size_in_words || packet_size_in_words >
+ qinfo->q_array.mem_size>>2) {
+ dprintk(VIDC_ERR, "Invalid packet size\n");
return -ENODATA;
}
read_idx = queue->qhdr_read_idx;
+ write_idx = queue->qhdr_write_idx;
- empty_space = (queue->qhdr_write_idx >= read_idx) ?
- (queue->qhdr_q_size - (queue->qhdr_write_idx - read_idx)) :
- (read_idx - queue->qhdr_write_idx);
+ empty_space = (write_idx >= read_idx) ?
+ ((qinfo->q_array.mem_size>>2) - (write_idx - read_idx)) :
+ (read_idx - write_idx);
if (empty_space <= packet_size_in_words) {
queue->qhdr_tx_req = 1;
dprintk(VIDC_ERR, "Insufficient size (%d) to write (%d)\n",
@@ -366,13 +368,20 @@ static int __write_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
queue->qhdr_tx_req = 0;
- new_write_idx = (queue->qhdr_write_idx + packet_size_in_words);
+ new_write_idx = write_idx + packet_size_in_words;
write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
- (queue->qhdr_write_idx << 2));
- if (new_write_idx < queue->qhdr_q_size) {
+ (write_idx << 2));
+ if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+ write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+ qinfo->q_array.mem_size)) {
+ dprintk(VIDC_ERR, "Invalid write index");
+ return -ENODATA;
+ }
+
+ if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
memcpy(write_ptr, packet, packet_size_in_words << 2);
} else {
- new_write_idx -= queue->qhdr_q_size;
+ new_write_idx -= qinfo->q_array.mem_size >> 2;
memcpy(write_ptr, packet, (packet_size_in_words -
new_write_idx) << 2);
memcpy((void *)qinfo->q_array.align_virtual_addr,
@@ -468,7 +477,8 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
u32 packet_size_in_words, new_read_idx;
u32 *read_ptr;
u32 receive_request = 0;
- int rc = 0;
+ u32 read_idx, write_idx;
+ int rc = 0;
if (!qinfo || !packet || !pb_tx_req_is_set) {
dprintk(VIDC_ERR, "Invalid Params\n");
@@ -501,7 +511,10 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
receive_request = 1;
- if (queue->qhdr_read_idx == queue->qhdr_write_idx) {
+ read_idx = queue->qhdr_read_idx;
+ write_idx = queue->qhdr_write_idx;
+
+ if (read_idx == write_idx) {
queue->qhdr_rx_req = receive_request;
/*
* mb() to ensure qhdr is updated in main memory
@@ -518,21 +531,28 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
}
read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
- (queue->qhdr_read_idx << 2));
+ (read_idx << 2));
+ if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
+ read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
+ qinfo->q_array.mem_size - sizeof(*read_ptr))) {
+ dprintk(VIDC_ERR, "Invalid read index\n");
+ return -ENODATA;
+ }
+
packet_size_in_words = (*read_ptr) >> 2;
if (!packet_size_in_words) {
dprintk(VIDC_ERR, "Zero packet size\n");
return -ENODATA;
}
- new_read_idx = queue->qhdr_read_idx + packet_size_in_words;
- if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE)
- && queue->qhdr_read_idx <= queue->qhdr_q_size) {
- if (new_read_idx < queue->qhdr_q_size) {
+ new_read_idx = read_idx + packet_size_in_words;
+ if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE) &&
+ read_idx <= (qinfo->q_array.mem_size >> 2)) {
+ if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
memcpy(packet, read_ptr,
packet_size_in_words << 2);
} else {
- new_read_idx -= queue->qhdr_q_size;
+ new_read_idx -= (qinfo->q_array.mem_size >> 2);
memcpy(packet, read_ptr,
(packet_size_in_words - new_read_idx) << 2);
memcpy(packet + ((packet_size_in_words -
@@ -543,18 +563,18 @@ static int __read_queue(struct vidc_iface_q_info *qinfo, u8 *packet,
} else {
dprintk(VIDC_WARN,
"BAD packet received, read_idx: %#x, pkt_size: %d\n",
- queue->qhdr_read_idx, packet_size_in_words << 2);
+ read_idx, packet_size_in_words << 2);
dprintk(VIDC_WARN, "Dropping this packet\n");
- new_read_idx = queue->qhdr_write_idx;
+ new_read_idx = write_idx;
rc = -ENODATA;
}
- queue->qhdr_read_idx = new_read_idx;
-
- if (queue->qhdr_read_idx != queue->qhdr_write_idx)
+ if (new_read_idx != write_idx)
queue->qhdr_rx_req = 0;
else
queue->qhdr_rx_req = receive_request;
+
+ queue->qhdr_read_idx = new_read_idx;
/*
* mb() to ensure qhdr is updated in main memory
* so that venus reads the updated header values
@@ -1172,7 +1192,7 @@ static int __iface_cmdq_write_relaxed(struct venus_hfi_device *device,
__strict_check(device);
if (!__core_in_valid_state(device)) {
- dprintk(VIDC_DBG, "%s - fw not in init state\n", __func__);
+ dprintk(VIDC_ERR, "%s - fw not in init state\n", __func__);
result = -EINVAL;
goto err_q_null;
}
@@ -2864,8 +2884,6 @@ static void __process_sys_error(struct venus_hfi_device *device)
{
struct hfi_sfr_struct *vsfr = NULL;
- __set_state(device, VENUS_STATE_DEINIT);
-
vsfr = (struct hfi_sfr_struct *)device->sfr.align_virtual_addr;
if (vsfr) {
void *p = memchr(vsfr->rg_data, '\0', vsfr->bufSize);
@@ -3120,6 +3138,10 @@ static int __response_handler(struct venus_hfi_device *device)
"Too many packets in message queue to handle at once, deferring read\n");
break;
}
+
+ /* do not read packets after sys error packet */
+ if (info->response_type == HAL_SYS_ERROR)
+ break;
}
if (requeue_pm_work && device->res->sw_power_collapsible) {
@@ -3182,8 +3204,13 @@ err_no_work:
for (i = 0; !IS_ERR_OR_NULL(device->response_pkt) &&
i < num_responses; ++i) {
struct msm_vidc_cb_info *r = &device->response_pkt[i];
- dprintk(VIDC_DBG, "Processing response %d of %d, type %d\n",
- (i + 1), num_responses, r->response_type);
+
+ if (!__core_in_valid_state(device)) {
+ dprintk(VIDC_ERR,
+ "Ignore responses from %d to %d as device is in invalid state",
+ (i + 1), num_responses);
+ break;
+ }
device->callback(r->response_type, &r->response);
}
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 54cbdfc05193..d929d4db75f7 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -66,6 +66,9 @@
/* 16 encoder and 16 decoder sessions */
#define VIDC_MAX_SESSIONS 32
+#define VIDC_MAX_DECODE_SESSIONS 16
+#define VIDC_MAX_ENCODE_SESSIONS 16
+
enum vidc_status {
VIDC_ERR_NONE = 0x0,
diff --git a/drivers/media/platform/msm/vidc_3x/hfi_response_handler.c b/drivers/media/platform/msm/vidc_3x/hfi_response_handler.c
index ffcbdd9bc62a..f8b5d1c086aa 100644
--- a/drivers/media/platform/msm/vidc_3x/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc_3x/hfi_response_handler.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -322,9 +322,10 @@ static int hfi_process_session_error(u32 device_id,
}
static int hfi_process_event_notify(u32 device_id,
- struct hfi_msg_event_notify_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_event_notify_packet *pkt = _pkt;
dprintk(VIDC_DBG, "Received: EVENT_NOTIFY\n");
if (pkt->size < sizeof(struct hfi_msg_event_notify_packet)) {
@@ -363,9 +364,10 @@ static int hfi_process_event_notify(u32 device_id,
}
static int hfi_process_sys_init_done(u32 device_id,
- struct hfi_msg_sys_init_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_init_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
enum vidc_status status = VIDC_ERR_NONE;
@@ -402,9 +404,10 @@ err_no_prop:
}
static int hfi_process_sys_rel_resource_done(u32 device_id,
- struct hfi_msg_sys_release_resource_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_release_resource_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
enum vidc_status status = VIDC_ERR_NONE;
u32 pkt_size;
@@ -608,6 +611,11 @@ static int hfi_fill_codec_info(u8 *data_ptr,
vidc_get_hal_codec((1 << i) & codecs);
capability->domain =
vidc_get_hal_domain(HFI_VIDEO_DOMAIN_DECODER);
+ if (codec_count == VIDC_MAX_DECODE_SESSIONS) {
+ dprintk(VIDC_ERR,
+ "Max supported decoder sessions reached\n");
+ break;
+ }
}
}
codecs = sys_init_done->enc_codec_supported;
@@ -619,6 +627,11 @@ static int hfi_fill_codec_info(u8 *data_ptr,
vidc_get_hal_codec((1 << i) & codecs);
capability->domain =
vidc_get_hal_domain(HFI_VIDEO_DOMAIN_ENCODER);
+ if (codec_count == VIDC_MAX_SESSIONS) {
+ dprintk(VIDC_ERR,
+ "Max supported sessions reached\n");
+ break;
+ }
}
}
sys_init_done->codec_count = codec_count;
@@ -1218,9 +1231,10 @@ static void hfi_process_sess_get_prop_buf_req(
}
static int hfi_process_session_prop_info(u32 device_id,
- struct hfi_msg_session_property_info_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_property_info_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
struct hfi_profile_level profile_level = {0};
enum hal_h264_entropy entropy = HAL_UNUSED_ENTROPY;
@@ -1293,9 +1307,10 @@ static int hfi_process_session_prop_info(u32 device_id,
}
static int hfi_process_session_init_done(u32 device_id,
- struct hfi_msg_sys_session_init_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_session_init_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
struct vidc_hal_session_init_done session_init_done = { {0} };
@@ -1327,9 +1342,10 @@ static int hfi_process_session_init_done(u32 device_id,
}
static int hfi_process_session_load_res_done(u32 device_id,
- struct hfi_msg_session_load_resources_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_load_resources_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_LOAD_RESOURCES_DONE[%#x]\n",
@@ -1357,9 +1373,10 @@ static int hfi_process_session_load_res_done(u32 device_id,
}
static int hfi_process_session_flush_done(u32 device_id,
- struct hfi_msg_session_flush_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_flush_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_FLUSH_DONE[%#x]\n",
@@ -1402,9 +1419,10 @@ static int hfi_process_session_flush_done(u32 device_id,
}
static int hfi_process_session_etb_done(u32 device_id,
- struct hfi_msg_session_empty_buffer_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_empty_buffer_done_packet *pkt = _pkt;
struct msm_vidc_cb_data_done data_done = {0};
struct hfi_picture_type *hfi_picture_type = NULL;
@@ -1453,9 +1471,10 @@ static int hfi_process_session_etb_done(u32 device_id,
}
static int hfi_process_session_ftb_done(
- u32 device_id, struct vidc_hal_msg_pkt_hdr *msg_hdr,
+ u32 device_id, void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct vidc_hal_msg_pkt_hdr *msg_hdr = _pkt;
struct msm_vidc_cb_data_done data_done = {0};
bool is_decoder = false, is_encoder = false;
@@ -1580,9 +1599,10 @@ static int hfi_process_session_ftb_done(
}
static int hfi_process_session_start_done(u32 device_id,
- struct hfi_msg_session_start_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_start_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_START_DONE[%#x]\n",
@@ -1608,9 +1628,10 @@ static int hfi_process_session_start_done(u32 device_id,
}
static int hfi_process_session_stop_done(u32 device_id,
- struct hfi_msg_session_stop_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_stop_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_STOP_DONE[%#x]\n",
@@ -1637,9 +1658,10 @@ static int hfi_process_session_stop_done(u32 device_id,
}
static int hfi_process_session_rel_res_done(u32 device_id,
- struct hfi_msg_session_release_resources_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_release_resources_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_RELEASE_RESOURCES_DONE[%#x]\n",
@@ -1666,9 +1688,10 @@ static int hfi_process_session_rel_res_done(u32 device_id,
}
static int hfi_process_session_rel_buf_done(u32 device_id,
- struct hfi_msg_session_release_buffers_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_session_release_buffers_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
if (!pkt || pkt->size <
@@ -1701,9 +1724,10 @@ static int hfi_process_session_rel_buf_done(u32 device_id,
}
static int hfi_process_session_end_done(u32 device_id,
- struct hfi_msg_sys_session_end_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_session_end_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_END_DONE[%#x]\n", pkt->session_id);
@@ -1728,9 +1752,10 @@ static int hfi_process_session_end_done(u32 device_id,
}
static int hfi_process_session_abort_done(u32 device_id,
- struct hfi_msg_sys_session_abort_done_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_session_abort_done_packet *pkt = _pkt;
struct msm_vidc_cb_cmd_done cmd_done = {0};
dprintk(VIDC_DBG, "RECEIVED: SESSION_ABORT_DONE[%#x]\n",
@@ -1836,9 +1861,10 @@ static void hfi_process_sys_get_prop_image_version(
}
static int hfi_process_sys_property_info(u32 device_id,
- struct hfi_msg_sys_property_info_packet *pkt,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
+ struct hfi_msg_sys_property_info_packet *pkt = _pkt;
if (!pkt) {
dprintk(VIDC_ERR, "%s: invalid param\n", __func__);
return -EINVAL;
@@ -1870,7 +1896,7 @@ static int hfi_process_sys_property_info(u32 device_id,
}
static int hfi_process_ignore(u32 device_id,
- struct vidc_hal_msg_pkt_hdr *msg_hdr,
+ void *_pkt,
struct msm_vidc_cb_info *info)
{
*info = (struct msm_vidc_cb_info) {
@@ -1954,5 +1980,6 @@ int hfi_process_msg_packet(u32 device_id, struct vidc_hal_msg_pkt_hdr *msg_hdr,
break;
}
- return pkt_func ? pkt_func(device_id, msg_hdr, info) : -ENOTSUPP;
+ return pkt_func ?
+ pkt_func(device_id, (void *)msg_hdr, info) : -ENOTSUPP;
}
diff --git a/drivers/media/platform/msm/vidc_3x/venus_hfi.c b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
index ec6f9b0b491e..e82b157cf37b 100644
--- a/drivers/media/platform/msm/vidc_3x/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc_3x/venus_hfi.c
@@ -1587,7 +1587,7 @@ static int __iface_cmdq_write_relaxed(struct venus_hfi_device *device,
__strict_check(device);
if (!__core_in_valid_state(device)) {
- dprintk(VIDC_DBG, "%s - fw not in init state\n", __func__);
+ dprintk(VIDC_ERR, "%s - fw not in init state\n", __func__);
result = -EINVAL;
goto err_q_null;
}
@@ -3337,8 +3337,6 @@ static void __process_sys_error(struct venus_hfi_device *device)
{
struct hfi_sfr_struct *vsfr = NULL;
- __set_state(device, VENUS_STATE_DEINIT);
-
/* Once SYS_ERROR received from HW, it is safe to halt the AXI.
* With SYS_ERROR, Venus FW may have crashed and HW might be
* active and causing unnecessary transactions. Hence it is
@@ -3581,6 +3579,9 @@ static int __response_handler(struct venus_hfi_device *device)
"Too many packets in message queue to handle at once, deferring read\n");
break;
}
+ /* do not read packets after sys error packet */
+ if (info->response_type == HAL_SYS_ERROR)
+ break;
}
if (requeue_pm_work && device->res->sw_power_collapsible) {
@@ -3644,6 +3645,12 @@ err_no_work:
i < num_responses; ++i) {
struct msm_vidc_cb_info *r = &device->response_pkt[i];
+ if (!__core_in_valid_state(device)) {
+ dprintk(VIDC_ERR,
+ "Ignore responses from %d to %d as device is in invalid state",
+ (i + 1), num_responses);
+ break;
+ }
device->callback(r->response_type, &r->response);
}
diff --git a/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h b/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
index 04bf5a873ef6..2cdc2b4bec0c 100644
--- a/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc_3x/vidc_hfi_api.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -66,6 +66,9 @@
/* 16 encoder and 16 decoder sessions */
#define VIDC_MAX_SESSIONS 32
+#define VIDC_MAX_DECODE_SESSIONS 16
+#define VIDC_MAX_ENCODE_SESSIONS 16
+
enum vidc_status {
VIDC_ERR_NONE = 0x0,
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 8cedef0daae4..b0aea48907b7 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -2016,6 +2016,8 @@ static int em28xx_dvb_fini(struct em28xx *dev)
}
}
+ em28xx_unregister_dvb(dvb);
+
/* remove I2C SEC */
client = dvb->i2c_client_sec;
if (client) {
@@ -2037,7 +2039,6 @@ static int em28xx_dvb_fini(struct em28xx *dev)
i2c_unregister_device(client);
}
- em28xx_unregister_dvb(dvb);
kfree(dvb);
dev->dvb = NULL;
kref_put(&dev->ref, em28xx_free_device);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f405125fbbde..8b5696d25bf1 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -813,6 +813,21 @@ config QPNP_MISC
and the driver provides an API to check if this interrupt
is available on the current PMIC chip.
+config SCBUF_CLIENT
+ tristate "Client for Secure Camera buffer protocol"
+ depends on OKL4_GUEST
+ select ANON_INODES
+ default n
+ help
+ Say Y here to enable a driver that communicates with the hypervisor
+ resource manager to obtain mappings of Secure Camera buffers that
+ have been exported by a camera driver running in another VM. The
+ buffers are identified by handles that must be obtained through
+ another channel, typically a direct IPC channel from the exporting
+ VM.
+
+ If in doubt, say N.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d57d21f54df1..35cd16d1a632 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -61,6 +61,8 @@ obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o
obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o
obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o
+obj-$(CONFIG_SCBUF_CLIENT) += scbuf-client.o
+
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_heap.o
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 50be365f00a2..f735ac8d32ba 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -8773,11 +8773,11 @@ static int qseecom_remove(struct platform_device *pdev)
&qseecom.registered_kclient_list_head, list) {
/* Break the loop if client handle is NULL */
- if (!kclient->handle)
- goto exit_free_kclient;
-
- if (list_empty(&kclient->list))
- goto exit_free_kc_handle;
+ if (!kclient->handle) {
+ list_del(&kclient->list);
+ kzfree(kclient);
+ break;
+ }
list_del(&kclient->list);
mutex_lock(&app_access_lock);
@@ -8790,11 +8790,6 @@ static int qseecom_remove(struct platform_device *pdev)
}
}
-exit_free_kc_handle:
- kzfree(kclient->handle);
-exit_free_kclient:
- kzfree(kclient);
-
spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
if (qseecom.qseos_version > QSEEE_VERSION_00)
diff --git a/drivers/misc/scbuf-client.c b/drivers/misc/scbuf-client.c
new file mode 100644
index 000000000000..0a326a1846cb
--- /dev/null
+++ b/drivers/misc/scbuf-client.c
@@ -0,0 +1,819 @@
+/*
+ * drivers/char/scbuf-client.c
+ *
+ * Copyright (c) 2018 Cog Systems Pty Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Secure Camera Buffer client driver.
+ *
+ * This driver communicates with a resource manager VM through an OKL4 Pipe,
+ * to request the physical address ranges for Secure Camera buffers identified
+ * by integer handles that have been obtained by userspace out-of-band
+ * (typically through a direct IPC link to the VM that exported the buffers).
+ * When a physical address range for a buffer is successfully obtained, the
+ * buffer is exported to userspace as an FD that supports mmap().
+ */
+
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/scbuf.h>
+#include <linux/anon_inodes.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/of.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+
+#include <microvisor/microvisor.h>
+
+#include "scbuf-client.h"
+
+#define DRIVER_NAME "scbuf-client"
+#define DEVICE_PREFIX "scbuf"
+#define MAX_DEVICES 4
+
+#define MAX_REGIONS 32
+
+
+static struct class *scbuf_class;
+static struct ida minor_ida;
+static dev_t chr_dev;
+
+
+struct scbuf_dev {
+ atomic_t open;
+ struct file *filp;
+
+ okl4_kcap_t tx_pipe_kcap;
+ int tx_irq, tx_okl4_irq;
+ okl4_kcap_t rx_pipe_kcap;
+ int rx_irq, rx_okl4_irq;
+ size_t max_msg_size;
+
+ bool notify_start;
+ wait_queue_head_t notify_wq;
+
+ bool tx_avail;
+ wait_queue_head_t tx_wq;
+
+ /* List of struct scbuf_req, for expected replies */
+ spinlock_t req_lock;
+ struct list_head req_queue;
+ struct work_struct rx_work;
+ size_t rx_buf_size;
+ struct res_mgr_msg *rx_buf;
+
+ struct platform_device *plat;
+ struct cdev cdev;
+ int id;
+ struct kref kref;
+};
+
+static void
+scbuf_dev_free(struct kref *kref)
+{
+ struct scbuf_dev *d = container_of(kref, struct scbuf_dev, kref);
+
+ if (d->rx_buf)
+ kfree(d->rx_buf);
+
+ kfree(d);
+}
+
+static inline void
+scbuf_dev_put(struct scbuf_dev *d)
+{
+ kref_put(&d->kref, scbuf_dev_free);
+}
+
+struct scbuf_region {
+ phys_addr_t base;
+ size_t size;
+};
+
+struct scbuf {
+ struct kref kref;
+ struct scbuf_dev *dev;
+
+ unsigned int region_count;
+ struct scbuf_region regions[];
+};
+
+static void
+scbuf_free(struct kref *kref)
+{
+ struct scbuf *b = container_of(kref, struct scbuf, kref);
+
+ scbuf_dev_put(b->dev);
+ kfree(b);
+}
+
+static inline void
+scbuf_put(struct scbuf *b)
+{
+ kref_put(&b->kref, scbuf_free);
+}
+
+struct scbuf_req {
+ struct scbuf_dev *dev;
+ int ret;
+ struct scbuf *buffer;
+ struct completion complete;
+ struct list_head list;
+ struct kref kref;
+};
+
+static void
+scbuf_req_free(struct kref *kref)
+{
+ struct scbuf_req *r = container_of(kref, struct scbuf_req, kref);
+
+ if (r->buffer)
+ scbuf_put(r->buffer);
+ scbuf_dev_put(r->dev);
+ kfree(r);
+}
+
+static inline void
+scbuf_req_put(struct scbuf_req *r)
+{
+ kref_put(&r->kref, scbuf_req_free);
+}
+
+static okl4_error_t
+okl4_pipe_control(okl4_kcap_t kcap, uint8_t control)
+{
+ okl4_pipe_control_t x = 0;
+
+ okl4_pipe_control_setdoop(&x, true);
+ okl4_pipe_control_setoperation(&x, control);
+ return _okl4_sys_pipe_control(kcap, x);
+}
+
+static int
+scbuf_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct scbuf *b = (struct scbuf *)filp->private_data;
+ size_t size = vma->vm_end - vma->vm_start;
+ size_t mapped, remaining;
+ unsigned long first_pgoff;
+ unsigned long pgoff = vma->vm_pgoff;
+ unsigned int first_region, last_region;
+ unsigned int i;
+
+ /* Look for the region containing the first page */
+ for (i = 0; i < b->region_count; i++) {
+ unsigned long region_pages = b->regions[i].size >> PAGE_SHIFT;
+
+ if (pgoff < region_pages)
+ break;
+ pgoff -= region_pages;
+ }
+ first_region = i;
+ first_pgoff = pgoff;
+
+ /* Offset out of range? */
+ if (first_region >= b->region_count)
+ return -EINVAL;
+
+ /* Look for the region containing the last page */
+ remaining = size;
+ pgoff = first_pgoff;
+ for (i = first_region; i < b->region_count; i++) {
+ if ((pgoff << PAGE_SHIFT) + remaining <= b->regions[i].size)
+ break;
+ remaining -= b->regions[i].size - (pgoff << PAGE_SHIFT);
+ pgoff = 0;
+ }
+ last_region = i;
+
+ /* End out of range? */
+ if (last_region >= b->region_count)
+ return -EINVAL;
+
+ /* Now set up the mappings */
+ pgoff = first_pgoff;
+ mapped = 0;
+ remaining = size;
+ for (i = first_region; i <= last_region; i++) {
+ unsigned long pfn = (b->regions[i].base >> PAGE_SHIFT) + pgoff;
+ size_t this_size = min(remaining,
+ b->regions[i].size - (pgoff << PAGE_SHIFT));
+
+ if (remap_pfn_range(vma, vma->vm_start + mapped, pfn,
+ this_size, vma->vm_page_prot))
+ return -EAGAIN;
+ remaining -= this_size;
+ mapped += this_size;
+ pgoff = 0;
+ }
+
+ return 0;
+}
+
+static int
+scbuf_release(struct inode *inode, struct file *filp)
+{
+ struct scbuf *b = (struct scbuf *)filp->private_data;
+
+ /* Allow the device file to close */
+ fput(b->dev->filp);
+
+ filp->private_data = NULL;
+ scbuf_put(b);
+
+ return 0;
+}
+
+static const struct file_operations scbuf_fops = {
+ .owner = THIS_MODULE,
+ .mmap = scbuf_mmap,
+ .release = scbuf_release,
+};
+
+static int
+scbuf_lookup_handle(struct scbuf_dev *d, u32 handle)
+{
+ struct res_mgr_msg msg;
+ struct scbuf_req *req;
+ int ret;
+ okl4_error_t err;
+
+ memset(&msg, 0, sizeof(msg));
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ kref_init(&req->kref);
+ init_completion(&req->complete);
+
+ kref_get(&d->kref);
+ req->dev = d;
+
+ msg.msg_id = RES_MGR_LOOKUP_HANDLE;
+ msg.securecam.handle = handle;
+
+ do {
+ spin_lock_irq(&d->req_lock);
+ err = _okl4_sys_pipe_send(d->tx_pipe_kcap, sizeof(msg),
+ (const uint8_t *)&msg);
+ if (err == OKL4_OK) {
+ kref_get(&req->kref);
+ list_add_tail(&req->list, &d->req_queue);
+ }
+ if (err == OKL4_ERROR_PIPE_FULL)
+ d->tx_avail = false;
+ spin_unlock_irq(&d->req_lock);
+
+ if (err != OKL4_ERROR_PIPE_FULL)
+ break;
+ if (d->filp->f_flags & O_NONBLOCK)
+ break;
+
+ ret = wait_event_interruptible(d->tx_wq, d->tx_avail);
+ if (ret < 0)
+ goto fail_tx;
+ } while (true);
+
+ if (err == OKL4_ERROR_PIPE_FULL) {
+ ret = -EAGAIN;
+ goto fail_tx;
+ } else if (err != OKL4_OK) {
+ ret = -EIO;
+ goto fail_tx;
+ }
+
+ ret = wait_for_completion_interruptible(&req->complete);
+ if (ret < 0)
+ goto fail_rx;
+
+ ret = req->ret;
+ if (ret < 0)
+ goto fail_rx;
+
+ if (WARN_ON(req->buffer == NULL)) {
+ ret = -EIO;
+ goto fail_rx;
+ }
+
+ /* Obtain an fd for the buffer */
+ ret = anon_inode_getfd("[scbuf]", &scbuf_fops, req->buffer, O_RDWR);
+ if (ret < 0)
+ goto fail_fd;
+ req->buffer = NULL;
+
+ /*
+ * Hold a reference to the device file so it can't be released while
+ * the buffer fd is still open / mapped
+ */
+ get_file(d->filp);
+
+fail_fd:
+fail_rx:
+fail_tx:
+ scbuf_req_put(req);
+ return ret;
+}
+
+static void
+scbuf_lookup_handle_reply(struct scbuf_dev *d, struct res_mgr_msg *msg,
+ size_t msg_size)
+{
+ struct scbuf_req *req;
+ struct scbuf *b;
+ const unsigned int region_count = msg->securecam.sglist.region_count;
+ const size_t expected_size = offsetof(struct res_mgr_msg,
+ securecam.sglist.regions)
+ + (region_count * sizeof(msg->securecam.sglist.regions[0]));
+ unsigned int i;
+
+ spin_lock(&d->req_lock);
+ req = list_first_entry(&d->req_queue, struct scbuf_req, list);
+ list_del(&req->list);
+ spin_unlock(&d->req_lock);
+
+ if (region_count == 0) {
+ req->ret = -ENXIO;
+ goto out;
+ } else if (region_count > MAX_REGIONS) {
+ dev_err(&d->plat->dev, "too many regions in sglist: %u\n",
+ region_count);
+ req->ret = -EIO;
+ goto out;
+ }
+
+ if (msg_size < expected_size) {
+ dev_err(&d->plat->dev, "short reply: want %zd bytes, got %zd\n",
+ expected_size, msg_size);
+ req->ret = -EIO;
+ goto out;
+ }
+
+ b = kzalloc(sizeof(*b) + (region_count * sizeof(b->regions[0])),
+ GFP_KERNEL);
+ if (!b) {
+ req->ret = -ENOMEM;
+ goto out;
+ }
+ kref_init(&b->kref);
+ b->dev = req->dev;
+ kref_get(&b->dev->kref);
+
+ /* Copy the regions into the buffer object */
+ b->region_count = region_count;
+ for (i = 0; i < region_count; i++) {
+ phys_addr_t base = msg->securecam.sglist.regions[i].address_ipa;
+ phys_addr_t size = msg->securecam.sglist.regions[i].size;
+
+ if (!IS_ALIGNED(base, PAGE_SIZE) ||
+ !IS_ALIGNED(size, PAGE_SIZE)) {
+ dev_err(&d->plat->dev, "misaligned region %#zx + %#zx\n",
+ (size_t)base, (size_t)size);
+ req->ret = -EIO;
+ scbuf_put(b);
+ goto out;
+ }
+
+ b->regions[i].base = base;
+ b->regions[i].size = size;
+ }
+
+ req->buffer = b;
+ req->ret = 0;
+
+out:
+ complete(&req->complete);
+ scbuf_req_put(req);
+}
+
+static void
+scbuf_notify_start(struct scbuf_dev *d)
+{
+ const uint32_t msg_id = RES_MGR_SECURECAM_ACK_START;
+ _okl4_sys_pipe_send(d->tx_pipe_kcap, sizeof(msg_id),
+ (const uint8_t *)&msg_id);
+ d->notify_start = true;
+ wake_up_all(&d->notify_wq);
+}
+
+static void
+scbuf_rx_work(struct work_struct *work)
+{
+ struct scbuf_dev *d = container_of(work, struct scbuf_dev, rx_work);
+ struct _okl4_sys_pipe_recv_return recv_ret;
+ struct res_mgr_msg *msg = d->rx_buf;
+
+ /*
+ * Note that there's no need to take the lock before the receive call,
+ * because we only read in this work item which is serialised.
+ */
+ recv_ret = _okl4_sys_pipe_recv(d->rx_pipe_kcap, d->rx_buf_size,
+ (uint8_t *)msg);
+
+ /* If the pipe is empty, nothing to do */
+ if (recv_ret.error == OKL4_ERROR_PIPE_EMPTY)
+ return;
+
+ if (recv_ret.error != OKL4_OK) {
+ dev_err(&d->plat->dev, "pipe receive error: %d\n",
+ recv_ret.error);
+ return;
+ }
+ if (recv_ret.size < sizeof(u32)) {
+ dev_err(&d->plat->dev, "pipe short message, ignored\n");
+ return;
+ }
+
+ if (msg->msg_id == RES_MGR_LOOKUP_HANDLE_REPLY) {
+ scbuf_lookup_handle_reply(d, msg, recv_ret.size);
+ } else if (msg->msg_id == RES_MGR_SECURECAM_NOTIFY_START) {
+ scbuf_notify_start(d);
+ } else {
+ dev_err(&d->plat->dev, "pipe unknown message id: %#x\n",
+ (unsigned int)msg->msg_id);
+ }
+
+ /* Reschedule the work in case there's another message waiting */
+ schedule_work(work);
+}
+
+static long
+scbuf_ioctl(struct file *filp, unsigned int cmd, unsigned long data)
+{
+ int ret;
+ struct scbuf_dev *d = (struct scbuf_dev *)filp->private_data;
+
+ switch (cmd) {
+ case IOCTL_SCBUF_LOOKUP_HANDLE:
+ ret = scbuf_lookup_handle(d, (u32)data);
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+scbuf_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ okl4_error_t okl4_err;
+ struct scbuf_dev *d = container_of(inode->i_cdev,
+ struct scbuf_dev, cdev);
+
+ if (atomic_inc_return(&d->open) > 1) {
+ atomic_dec(&d->open);
+ return -EBUSY;
+ }
+
+ kref_get(&d->kref);
+ filp->private_data = d;
+ d->filp = filp;
+
+ okl4_err = okl4_pipe_control(d->rx_pipe_kcap,
+ OKL4_PIPE_CONTROL_OP_SET_RX_READY);
+ if (okl4_err != OKL4_OK) {
+ ret = -EIO;
+ goto fail_rx_ready;
+ }
+
+ okl4_err = okl4_pipe_control(d->tx_pipe_kcap,
+ OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+ if (okl4_err != OKL4_OK) {
+ ret = -EIO;
+ goto fail_tx_ready;
+ }
+
+ d->tx_avail = true;
+ schedule_work(&d->rx_work);
+
+ return 0;
+
+fail_tx_ready:
+ okl4_pipe_control(d->rx_pipe_kcap, OKL4_PIPE_CONTROL_OP_RESET);
+fail_rx_ready:
+ d->filp = NULL;
+ filp->private_data = NULL;
+ scbuf_dev_put(d);
+ atomic_dec(&d->open);
+ return ret;
+}
+
+static unsigned int
+scbuf_poll(struct file *filp, poll_table *wait)
+{
+ struct scbuf_dev *d = (struct scbuf_dev *)filp->private_data;
+ unsigned int mask = 0;
+
+ poll_wait(filp, &d->notify_wq, wait);
+ poll_wait(filp, &d->tx_wq, wait);
+
+ if (d->notify_start)
+ mask |= POLLPRI;
+ if (d->tx_avail)
+ mask |= POLLOUT | POLLWRNORM;
+
+ return mask;
+}
+
+static int
+scbuf_close(struct inode *inode, struct file *filp)
+{
+ struct scbuf_dev *d = container_of(inode->i_cdev,
+ struct scbuf_dev, cdev);
+
+ /* Prevent any buffer creation or notifications while closed */
+ okl4_pipe_control(d->rx_pipe_kcap, OKL4_PIPE_CONTROL_OP_RESET);
+ okl4_pipe_control(d->tx_pipe_kcap, OKL4_PIPE_CONTROL_OP_RESET);
+
+ /* Ensure that interrupt handlers have completed */
+ disable_irq(d->rx_irq);
+ disable_irq(d->tx_irq);
+
+ /* Abort any still-pending buffer lookups */
+ spin_lock(&d->req_lock);
+ while (!list_empty(&d->req_queue)) {
+ struct scbuf_req *req = list_first_entry(
+ &d->req_queue, struct scbuf_req, list);
+ req->ret = -ECANCELED;
+ complete(&req->complete);
+ list_del(&req->list);
+ scbuf_req_put(req);
+ }
+ spin_unlock(&d->req_lock);
+
+ /* Clean up the open device's state */
+ d->notify_start = false;
+ d->filp = NULL;
+ filp->private_data = NULL;
+ scbuf_dev_put(d);
+ atomic_dec(&d->open);
+
+ /* Re-enable interrupts */
+ enable_irq(d->rx_irq);
+ enable_irq(d->tx_irq);
+
+ return 0;
+}
+
+static const struct file_operations scbuf_dev_fops = {
+ .owner = THIS_MODULE,
+ .open = scbuf_open,
+ .release = scbuf_close,
+ .compat_ioctl = scbuf_ioctl,
+ .unlocked_ioctl = scbuf_ioctl,
+ .poll = scbuf_poll,
+};
+
+static irqreturn_t
+scbuf_tx_irq(int irq, void *dev)
+{
+ struct scbuf_dev *d = dev;
+ struct _okl4_sys_interrupt_get_payload_return ret;
+ okl4_pipe_state_t payload;
+
+ ret = _okl4_sys_interrupt_get_payload(d->tx_okl4_irq);
+ if (ret.error != OKL4_OK)
+ return IRQ_NONE;
+
+ payload = ret.payload;
+ if (okl4_pipe_state_gettxavailable(&payload)) {
+ d->tx_avail = true;
+ wake_up(&d->tx_wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+scbuf_rx_irq(int irq, void *dev)
+{
+ struct scbuf_dev *d = dev;
+ struct _okl4_sys_interrupt_get_payload_return ret;
+ okl4_pipe_state_t payload;
+
+ ret = _okl4_sys_interrupt_get_payload(d->rx_okl4_irq);
+ if (ret.error != OKL4_OK)
+ return IRQ_NONE;
+
+ payload = ret.payload;
+ if (okl4_pipe_state_getrxavailable(&payload))
+ schedule_work(&d->rx_work);
+
+ return IRQ_HANDLED;
+}
+
+static int
+scbuf_probe(struct platform_device *plat)
+{
+ int ret;
+ dev_t new_chr_dev;
+ struct device *device;
+ u32 reg[2];
+ u32 size_prop;
+ struct resource *irq;
+
+ struct scbuf_dev *priv = kzalloc(sizeof(struct scbuf_dev),
+ GFP_KERNEL);
+ if (priv == NULL) {
+ ret = -ENOMEM;
+ goto fail_alloc_priv;
+ }
+ priv->plat = plat;
+ kref_init(&priv->kref);
+ dev_set_drvdata(&plat->dev, priv);
+
+ INIT_WORK(&priv->rx_work, scbuf_rx_work);
+ INIT_LIST_HEAD(&priv->req_queue);
+ atomic_set(&priv->open, 0);
+ init_waitqueue_head(&priv->notify_wq);
+ init_waitqueue_head(&priv->tx_wq);
+
+ ret = ida_simple_get(&minor_ida, 0, MAX_DEVICES, GFP_KERNEL);
+ if (ret < 0)
+ goto fail_alloc_minor;
+ priv->id = ret;
+
+ if (of_property_read_u32_array(plat->dev.of_node, "reg", reg, 2)) {
+ dev_err(&plat->dev, "two OKL4 Pipe capabilities required\n");
+ ret = -ENODEV;
+ goto fail_pipe_kcaps;
+ }
+ priv->tx_pipe_kcap = reg[0];
+ priv->rx_pipe_kcap = reg[1];
+
+ if (of_property_read_u32(plat->dev.of_node, "okl,message-size",
+ &size_prop)) {
+ dev_warn(&plat->dev, "message size unknown, assuming 128\n");
+ size_prop = 128;
+ }
+ priv->rx_buf_size = (size_t)size_prop;
+ priv->rx_buf = kmalloc(priv->rx_buf_size, GFP_KERNEL);
+ if (!priv->rx_buf) {
+ ret = -ENOMEM;
+ goto fail_alloc_rx_buf;
+ }
+
+ irq = platform_get_resource(plat, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&plat->dev, "no TX interrupt found");
+ ret = -ENODEV;
+ goto fail_tx_interrupt;
+ }
+ priv->tx_irq = irq->start;
+ ret = devm_request_irq(&plat->dev, priv->tx_irq, scbuf_tx_irq, 0,
+ dev_name(&plat->dev), priv);
+ if (ret) {
+ dev_err(&plat->dev, "can't register TX interrupt %d: %d\n",
+ priv->tx_irq, ret);
+ goto fail_tx_interrupt;
+ }
+ priv->tx_okl4_irq = irqd_to_hwirq(irq_get_irq_data(priv->tx_irq));
+
+ irq = platform_get_resource(plat, IORESOURCE_IRQ, 1);
+ if (!irq) {
+ dev_err(&plat->dev, "no RX interrupt found");
+ ret = -ENODEV;
+ goto fail_rx_interrupt;
+ }
+ priv->rx_irq = irq->start;
+ ret = devm_request_irq(&plat->dev, priv->rx_irq, scbuf_rx_irq, 0,
+ dev_name(&plat->dev), priv);
+ if (ret) {
+ dev_err(&plat->dev, "can't register RX interrupt %d: %d\n",
+ priv->rx_irq, ret);
+ goto fail_rx_interrupt;
+ }
+ priv->rx_okl4_irq = irqd_to_hwirq(irq_get_irq_data(priv->rx_irq));
+
+ new_chr_dev = MKDEV(MAJOR(chr_dev), MINOR(chr_dev) + priv->id);
+ cdev_init(&priv->cdev, &scbuf_dev_fops);
+ priv->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&priv->cdev, new_chr_dev, 1);
+ if (ret)
+ goto fail_cdev_add;
+
+ device = device_create(scbuf_class, &plat->dev, new_chr_dev,
+ NULL, DEVICE_PREFIX "%d", priv->id);
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(scbuf_class);
+ goto fail_device_create;
+ }
+
+ return 0;
+
+ device_destroy(scbuf_class, priv->cdev.dev);
+fail_device_create:
+ cdev_del(&priv->cdev);
+fail_cdev_add:
+ devm_free_irq(&plat->dev, priv->rx_irq, priv);
+fail_rx_interrupt:
+ devm_free_irq(&plat->dev, priv->tx_irq, priv);
+fail_tx_interrupt:
+fail_alloc_rx_buf:
+fail_pipe_kcaps:
+ ida_simple_remove(&minor_ida, priv->id);
+fail_alloc_minor:
+ priv->plat = NULL;
+
+ dev_set_drvdata(&plat->dev, NULL);
+ kref_put(&priv->kref, scbuf_free);
+fail_alloc_priv:
+ return ret;
+}
+
+static int
+scbuf_remove(struct platform_device *plat)
+{
+ struct scbuf_dev *priv = dev_get_drvdata(&plat->dev);
+
+ device_destroy(scbuf_class, priv->cdev.dev);
+ cdev_del(&priv->cdev);
+
+ devm_free_irq(&plat->dev, priv->rx_irq, priv);
+ devm_free_irq(&plat->dev, priv->tx_irq, priv);
+
+ ida_simple_remove(&minor_ida, priv->id);
+
+ priv->plat = NULL;
+ synchronize_rcu();
+
+ dev_set_drvdata(&plat->dev, NULL);
+ kref_put(&priv->kref, scbuf_free);
+
+ return 0;
+}
+
+static const struct of_device_id scbuf_match[] = {
+ { .compatible = "qcom,resource-manager-scbuf" },
+ {},
+};
+
+static struct platform_driver scbuf_driver = {
+ .probe = scbuf_probe,
+ .remove = scbuf_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = scbuf_match,
+ },
+};
+
+static int __init
+scbuf_init(void)
+{
+ int ret;
+
+ ida_init(&minor_ida);
+
+ ret = alloc_chrdev_region(&chr_dev, 0, MAX_DEVICES, DEVICE_PREFIX);
+ if (ret < 0)
+ goto fail_alloc_chrdev_region;
+
+ scbuf_class = class_create(THIS_MODULE, DRIVER_NAME);
+ if (IS_ERR(scbuf_class)) {
+ ret = PTR_ERR(scbuf_class);
+ goto fail_class_create;
+ }
+
+ ret = platform_driver_register(&scbuf_driver);
+ if (ret < 0)
+ goto fail_driver_register;
+
+ return 0;
+
+fail_driver_register:
+ class_destroy(scbuf_class);
+fail_class_create:
+ unregister_chrdev_region(chr_dev, MAX_DEVICES);
+fail_alloc_chrdev_region:
+ return ret;
+}
+
+static void __exit
+scbuf_exit(void)
+{
+ platform_driver_unregister(&scbuf_driver);
+ class_destroy(scbuf_class);
+ unregister_chrdev_region(chr_dev, MAX_DEVICES);
+}
+
+module_init(scbuf_init);
+module_exit(scbuf_exit);
+
+MODULE_DESCRIPTION("Secure Camera Buffer client");
diff --git a/drivers/misc/scbuf-client.h b/drivers/misc/scbuf-client.h
new file mode 100644
index 000000000000..ff465dacaaca
--- /dev/null
+++ b/drivers/misc/scbuf-client.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2018 Cog Systems Pty Ltd
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Core API
+ */
+#define ERROR_REPLY 0x8000ffff
+
+/*
+ * Boot manager API
+ */
+#define BOOT_MGR_PROTOCOL_ID 'B'
+
+/* start_client: Unmap the client (ML VM) memory and start Linux */
+#define BOOT_MGR_START_CLIENT 0x00420001
+/* msg_payload: struct boot_mgr_start_params */
+
+struct boot_mgr_start_params {
+ uint64_t entry_addr; /* Physical load address / entry point of Linux */
+ uint64_t dtb_addr; /* Physical address of DTB */
+ bool is_64bit; /* True to reset VM to AArch64 mode, false for AArch32 */
+};
+
+/* start_client_reply: Response to BOOT_MGR_START_CLIENT */
+#define BOOT_MGR_START_CLIENT_REPLY 0x80420001
+/* msg_payload: bool success */
+
+/* start_self: Reset the caller and start the loaded HLOS image */
+#define BOOT_MGR_START_SELF 0x00420002
+/* msg_payload: struct boot_mgr_start_params */
+
+/*
+ * start_self_reply: Response to BOOT_MGR_START_CLIENT; sent only on
+ * failure as the caller will be reset if this call succeeds
+ */
+#define BOOT_MGR_START_SELF_REPLY 0x80420002
+/* msg_payload: bool success */
+
+
+/*
+ * Secure Camera Server API (for HLOS)
+ */
+#define RES_MGR_SECURECAM_SERVER_PROTOCOL_ID 'q'
+
+/*
+ * get_handle: Given a buffer sg list, return an SC handle.
+ *
+ * This is sent by the HLOS to the resource manager to obtain the SC handle
+ * to be used to refer to a specific camera buffer.
+ *
+ * The message payload is a list of IPA ranges in the HLOS VM's stage 2
+ * address space. These ranges must have previously been passed to a TZ secure
+ * camera map call that has been intercepted by the hypervisor and forwarded
+ * to both TZ and the resource manager.
+ *
+ * Payload: struct res_mgr_sglist securecam.sglist
+ * Note: The payload ends with a variable-length array.
+ */
+#define RES_MGR_SECURECAM_GET_HANDLE 0x00710001
+
+struct res_mgr_region {
+ uint64_t address_ipa;
+ uint32_t size;
+};
+
+struct res_mgr_sglist {
+ uint32_t region_count;
+ struct res_mgr_region regions[];
+};
+
+/*
+ * get_handle_reply: Response to a get_handle request.
+ *
+ * This is sent by the resource manager to the HLOS to return the SC handle to
+ * be used to refer to the specified buffer.
+ *
+ * If the specified sglist did not match a secure camera buffer known to the
+ * resource manager, the value 0xffffffff is returned. This value is never
+ * a valid SC handle.
+ *
+ * Payload: uint32_t securecam.handle
+ */
+#define RES_MGR_SECURECAM_GET_HANDLE_REPLY 0x80710001
+
+/*
+ * destroy_handles: Destroy all SC handles and unmap their buffers.
+ *
+ * This is sent by the HLOS to the resource manager to ask it to unmap all
+ * secure camera buffers from the ML VM and return the memory to the HLOS.
+ *
+ * Under normal operation, this message will be received by the resource
+ * manager after the ML VM has indicated that its application is complete by
+ * sending a DONE message. If this is not the case, the resource manager will
+ * wait until both this message and the DONE message have been received before
+ * destroying the buffers.
+ *
+ * Payload: void
+ */
+#define RES_MGR_SECURECAM_DESTROY_HANDLES 0x00710002
+
+/*
+ * destroy_handles_reply: Indicate that all SC handles have been destroyed.
+ *
+ * This is sent by the resource manager to the HLOS to inform it that all
+ * secure camera buffers have been unmapped from the ML VM and returned to the
+ * HLOS.
+ *
+ * Payload: void
+ */
+#define RES_MGR_SECURECAM_DESTROY_HANDLES_REPLY 0x80710002
+
+
+/*
+ * Secure Camera Client API (for ML VM)
+ */
+#define RES_MGR_SECURECAM_CLIENT_PROTOCOL_ID 'Q'
+
+/*
+ * notify_start: Tell the client that the first camera buffer has been mapped.
+ *
+ * This is sent by the resource manager to the ML VM after the first instance
+ * of a TZ map call for a secure camera buffer being intercepted.
+ *
+ * Payload: void
+ */
+#define RES_MGR_SECURECAM_NOTIFY_START 0x80510001
+
+/*
+ * ack_start: Acknowledge a notify_start message
+ *
+ * This is sent by the ML VM to the resource manager to acknowledge receipt
+ * of a notify_start message.
+ *
+ * Payload: void
+ */
+#define RES_MGR_SECURECAM_ACK_START 0x00510001
+
+/*
+ * done: Indicate that the secure camera application has terminated.
+ *
+ * This is sent by the ML VM when access to the secure camera buffers is no
+ * longer required. The resource manager will delay unmapping the buffers
+ * until this message is received.
+ *
+ * Payload: void
+ */
+#define RES_MGR_SECURECAM_DONE 0x00510002
+
+/*
+ * lookup_handle: Request physical addresses for a secure camera handle.
+ *
+ * This is sent by the ML VM when userspace code attempts to register a secure
+ * camera buffer handle.
+ *
+ * Payload: uint32_t securecam.handle
+ */
+#define RES_MGR_LOOKUP_HANDLE 0x00510003
+
+/*
+ * lookup_handle_reply: Response to lookup_handle.
+ *
+ * When the resource manager receives a lookup_handle message containing a
+ * handle that is valid and has already been mapped into the ML VM stage 2,
+ * this message is returned containing the list of IPA ranges that have been
+ * assigned to the buffer in the ML VM's address space.
+ *
+ * If the handle is unknown, or corresponds to a buffer that is not currently
+ * mapped into the ML VM stage 2, the region_count field of the result will be
+ * set to 0.
+ *
+ * Payload: struct res_mgr_sglist securecam.sglist
+ * Note: The payload ends with a variable-length array.
+ */
+#define RES_MGR_LOOKUP_HANDLE_REPLY 0x80510003
+
+/*
+ * Top-level message structure
+ */
+struct res_mgr_msg {
+ uint32_t msg_id;
+ union {
+ bool success;
+ struct {
+ struct boot_mgr_start_params start_params;
+ } boot_mgr;
+ struct {
+ uint32_t handle;
+ struct res_mgr_sglist sglist;
+ } securecam;
+ };
+};
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index ff4f84fdab38..86aa3b2a92ca 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -2973,15 +2973,20 @@ static int mmc_runtime_suspend(struct mmc_host *host)
*/
static int mmc_runtime_resume(struct mmc_host *host)
{
- int err;
+ int err = 0;
ktime_t start = ktime_get();
MMC_TRACE(host, "%s\n", __func__);
+
+ if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
+ goto out;
+
err = _mmc_resume(host);
if (err && err != -ENOMEDIUM)
pr_err("%s: error %d doing runtime resume\n",
mmc_hostname(host), err);
+out:
trace_mmc_runtime_resume(mmc_hostname(host), err,
ktime_to_us(ktime_sub(ktime_get(), start)));
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index ffcd00153ee2..ba561c5daf7a 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -746,11 +746,7 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
if (!event)
continue;
- /*
- * Check if an attempt was made to free this event during
- * the CPU went offline.
- */
- if (event->state == PERF_EVENT_STATE_ZOMBIE)
+ if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
switch (cmd) {
@@ -876,10 +872,8 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
return 0;
- data.cmd = CPU_PM_EXIT;
- cpu_pm_pmu_common(&data);
- if (data.ret == NOTIFY_DONE)
- return 0;
+ if (pmu->reset)
+ pmu->reset(pmu);
if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF &&
data.armpmu->plat_device) {
@@ -905,8 +899,6 @@ static int arm_perf_stopping_cpu(unsigned int cpu, struct hlist_node *node)
if (!pmu || !cpumask_test_cpu(cpu, &pmu->supported_cpus))
return 0;
- data.cmd = CPU_PM_ENTER;
- cpu_pm_pmu_common(&data);
/* Disarm the PMU IRQ before disappearing. */
if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING &&
data.armpmu->plat_device) {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 1441678113aa..58b7eef22454 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013, Sony Mobile Communications AB.
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,6 +33,7 @@
#include <linux/pm.h>
#include <linux/log2.h>
#include <linux/irq.h>
+#include <soc/qcom/scm.h>
#include "../core.h"
#include "../pinconf.h"
#include "pinctrl-msm.h"
@@ -71,6 +72,8 @@ struct msm_pinctrl {
const struct msm_pinctrl_soc_data *soc;
void __iomem *regs;
void __iomem *pdc_regs;
+ phys_addr_t spi_cfg_regs;
+ phys_addr_t spi_cfg_end;
};
static int msm_get_groups_count(struct pinctrl_dev *pctldev)
@@ -1125,8 +1128,12 @@ static void add_dirconn_tlmm(struct irq_data *d, irq_hw_number_t irq)
struct irq_desc *desc = irq_data_to_desc(d);
struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
struct irq_data *dir_conn_data = NULL;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
int offset = 0;
- unsigned int virt = 0;
+ unsigned int virt = 0, val = 0;
+ struct msm_pinctrl *pctrl;
+ phys_addr_t spi_cfg_reg = 0;
+ unsigned long flags;
offset = select_dir_conn_mux(d, &irq);
if (offset < 0 || !parent_data)
@@ -1142,6 +1149,29 @@ static void add_dirconn_tlmm(struct irq_data *d, irq_hw_number_t irq)
dir_conn_data = &(desc->irq_data);
if (dir_conn_data) {
+
+ pctrl = gpiochip_get_data(gc);
+ if (pctrl->spi_cfg_regs) {
+ spi_cfg_reg = pctrl->spi_cfg_regs +
+ ((dir_conn_data->hwirq - 32) / 32) * 4;
+ if (spi_cfg_reg < pctrl->spi_cfg_end) {
+ spin_lock_irqsave(&pctrl->lock, flags);
+ val = scm_io_read(spi_cfg_reg);
+ /*
+ * Clear the respective bit for edge type
+ * interrupt
+ */
+ val &= ~(1 << ((dir_conn_data->hwirq - 32)
+ % 32));
+ WARN_ON(scm_io_write(spi_cfg_reg, val));
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ } else
+ pr_err("%s: type config failed for SPI: %lu\n",
+ __func__, irq);
+ } else
+ pr_debug("%s: type config for SPI is not supported\n",
+ __func__);
+
if (dir_conn_data->chip && dir_conn_data->chip->irq_set_type)
dir_conn_data->chip->irq_set_type(dir_conn_data,
IRQ_TYPE_EDGE_RISING);
@@ -1177,23 +1207,53 @@ static int msm_dirconn_irq_set_type(struct irq_data *d, unsigned int type)
{
struct irq_desc *desc = irq_data_to_desc(d);
struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
irq_hw_number_t irq = 0;
+ struct msm_pinctrl *pctrl;
+ phys_addr_t spi_cfg_reg = 0;
+ unsigned int config_val = 0;
+ unsigned int val = 0;
+ unsigned long flags;
if (!parent_data)
return 0;
- if (type == IRQ_TYPE_EDGE_BOTH) {
+ pctrl = gpiochip_get_data(gc);
+
+ if (type == IRQ_TYPE_EDGE_BOTH)
add_dirconn_tlmm(d, irq);
- } else {
+ else {
if (is_gpio_dual_edge(d, &irq))
remove_dirconn_tlmm(d, irq);
}
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ /*
+ * Shared SPI config for Edge is 0 and
+ * for Level interrupt is 1
+ */
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) {
irq_set_handler_locked(d, handle_level_irq);
- else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ config_val = 1;
+ } else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
irq_set_handler_locked(d, handle_edge_irq);
+ if (pctrl->spi_cfg_regs && type != IRQ_TYPE_NONE) {
+ spi_cfg_reg = pctrl->spi_cfg_regs +
+ ((parent_data->hwirq - 32) / 32) * 4;
+ if (spi_cfg_reg < pctrl->spi_cfg_end) {
+ spin_lock_irqsave(&pctrl->lock, flags);
+ val = scm_io_read(spi_cfg_reg);
+ val &= ~(1 << ((parent_data->hwirq - 32) % 32));
+ if (config_val)
+ val |= (1 << ((parent_data->hwirq - 32) % 32));
+ WARN_ON(scm_io_write(spi_cfg_reg, val));
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+ } else
+ pr_err("%s: type config failed for SPI: %lu\n",
+ __func__, irq);
+ } else
+ pr_debug("%s: SPI type config is not supported\n", __func__);
+
if (parent_data->chip->irq_set_type)
return parent_data->chip->irq_set_type(parent_data, type);
@@ -1388,6 +1448,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
struct msm_pinctrl *pctrl;
struct resource *res;
int ret;
+ char *key;
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl) {
@@ -1400,14 +1461,23 @@ int msm_pinctrl_probe(struct platform_device *pdev,
spin_lock_init(&pctrl->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ key = "pinctrl_regs";
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pctrl->regs))
return PTR_ERR(pctrl->regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ key = "pdc_regs";
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
pctrl->pdc_regs = devm_ioremap_resource(&pdev->dev, res);
+ key = "spi_cfg_regs";
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (res) {
+ pctrl->spi_cfg_regs = res->start;
+ pctrl->spi_cfg_end = res->end;
+ }
+
msm_pinctrl_setup_pm_reset(pctrl);
pctrl->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index 6cce2eb6b420..e9d7dfc4acc8 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -1586,7 +1586,7 @@ static const struct msm_pingroup sdm670_groups[] = {
[154] = SDC_QDSD_PINGROUP(sdc2_clk, 0x9a000, 14, 6),
[155] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x9a000, 11, 3),
[156] = SDC_QDSD_PINGROUP(sdc2_data, 0x9a000, 9, 0),
- [157] = UFS_RESET(ufs_reset, 0x9f000),
+ [157] = UFS_RESET(ufs_reset, 0x99d000),
};
static struct msm_dir_conn sdm670_dir_conn[] = {
{1, 510},
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
index 9e68843dcc12..d64a1c7bfecd 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -562,8 +562,14 @@ ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
IPADBG("msg=%pK\n", msg);
locked = 0;
mutex_unlock(&ipa_ctx->msg_lock);
+ if (count < sizeof(struct ipa_msg_meta)) {
+ kfree(msg);
+ msg = NULL;
+ ret = -EFAULT;
+ break;
+ }
if (copy_to_user(buf, &msg->meta,
- sizeof(struct ipa_msg_meta))) {
+ sizeof(struct ipa_msg_meta))) {
kfree(msg);
msg = NULL;
ret = -EFAULT;
@@ -572,8 +578,15 @@ ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
buf += sizeof(struct ipa_msg_meta);
count -= sizeof(struct ipa_msg_meta);
if (msg->buff) {
- if (copy_to_user(buf, msg->buff,
- msg->meta.msg_len)) {
+ if (count >= msg->meta.msg_len) {
+ if (copy_to_user(buf, msg->buff,
+ msg->meta.msg_len)) {
+ kfree(msg);
+ msg = NULL;
+ ret = -EFAULT;
+ break;
+ }
+ } else {
kfree(msg);
msg = NULL;
ret = -EFAULT;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index 579ab1ce40d0..81e98f2c8697 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -35,6 +35,13 @@ enum nat_table_type {
#define NAT_TABLE_ENTRY_SIZE_BYTE 32
#define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4
+/*
+ * Max NAT table entries is limited 1000 entries.
+ * Limit the memory size required by user to prevent kernel memory starvation
+ */
+#define IPA_TABLE_MAX_ENTRIES 1000
+#define MAX_ALLOC_NAT_SIZE (IPA_TABLE_MAX_ENTRIES * NAT_TABLE_ENTRY_SIZE_BYTE)
+
static int ipa_nat_vma_fault_remap(
struct vm_area_struct *vma, struct vm_fault *vmf)
{
@@ -270,6 +277,13 @@ int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
goto bail;
}
+ if (mem->size > MAX_ALLOC_NAT_SIZE) {
+ IPAERR("Trying allocate more size = %zu, Max allowed = %d\n",
+ mem->size, MAX_ALLOC_NAT_SIZE);
+ result = -EPERM;
+ goto bail;
+ }
+
if (mem->size <= 0 ||
nat_ctx->is_dev_init == true) {
IPAERR_RL("Invalid Parameters or device is already init\n");
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 66a8d0b22e4f..88753d5a0600 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1325,6 +1325,8 @@ int ipa2_reset_rt(enum ipa_ip_type ip)
struct ipa_rt_entry *rule_next;
struct ipa_rt_tbl_set *rset;
u32 apps_start_idx;
+ struct ipa_hdr_entry *hdr_entry;
+ struct ipa_hdr_proc_ctx_entry *hdr_proc_entry;
int id;
if (ip >= IPA_IP_MAX) {
@@ -1369,6 +1371,25 @@ int ipa2_reset_rt(enum ipa_ip_type ip)
continue;
list_del(&rule->link);
+ if (rule->hdr) {
+ hdr_entry = ipa_id_find(rule->rule.hdr_hdl);
+ if (!hdr_entry ||
+ hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Header already deleted\n");
+ return -EINVAL;
+ }
+ } else if (rule->proc_ctx) {
+ hdr_proc_entry =
+ ipa_id_find(
+ rule->rule.hdr_proc_ctx_hdl);
+ if (!hdr_proc_entry ||
+ hdr_proc_entry->cookie !=
+ IPA_PROC_HDR_COOKIE) {
+ IPAERR_RL(
+ "Proc entry already deleted\n");
+ return -EINVAL;
+ }
+ }
tbl->rule_cnt--;
if (rule->hdr)
__ipa_release_hdr(rule->hdr->id);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index 40ef59aae5f2..5e98bcfe57d6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -570,6 +570,12 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
if (msg) {
locked = 0;
mutex_unlock(&ipa3_ctx->msg_lock);
+ if (count < sizeof(struct ipa_msg_meta)) {
+ kfree(msg);
+ msg = NULL;
+ ret = -EFAULT;
+ break;
+ }
if (copy_to_user(buf, &msg->meta,
sizeof(struct ipa_msg_meta))) {
ret = -EFAULT;
@@ -580,8 +586,15 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
buf += sizeof(struct ipa_msg_meta);
count -= sizeof(struct ipa_msg_meta);
if (msg->buff) {
- if (copy_to_user(buf, msg->buff,
- msg->meta.msg_len)) {
+ if (count >= msg->meta.msg_len) {
+ if (copy_to_user(buf, msg->buff,
+ msg->meta.msg_len)) {
+ ret = -EFAULT;
+ kfree(msg);
+ msg = NULL;
+ break;
+ }
+ } else {
ret = -EFAULT;
kfree(msg);
msg = NULL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 7fd8e50009f0..c3229ebec0d9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1523,7 +1523,6 @@ int ipa3_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del)
ipa3_ctx->nat_mem.pdn_mem.base,
ipa3_ctx->nat_mem.pdn_mem.phys_base);
ipa3_ctx->nat_mem.pdn_mem.base = NULL;
- ipa3_ctx->nat_mem.dev.is_mem_allocated = false;
}
ipa3_nat_ipv6ct_free_mem(&ipa3_ctx->nat_mem.dev);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index dacccbc4d033..d86cdd194639 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1046,13 +1046,12 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
goto error;
}
/*
- * do not allow any rules to be added at end of the "default" routing
- * tables
+ * do not allow any rule to be added at "default" routing
+ * table
*/
if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
- (tbl->rule_cnt > 0) && (at_rear != 0)) {
- IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d at_rear=%d"
- , tbl->rule_cnt, at_rear);
+ (tbl->rule_cnt > 0)) {
+ IPAERR_RL("cannot add rules to default rt table\n");
goto error;
}
@@ -1258,13 +1257,12 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
}
/*
- * do not allow any rules to be added at end of the "default" routing
- * tables
+ * do not allow any rule to be added at "default" routing
+ * table
*/
if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
- (&entry->link == tbl->head_rt_rule_list.prev)) {
- IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d\n",
- tbl->rule_cnt);
+ (tbl->rule_cnt > 0)) {
+ IPAERR_RL("cannot add rules to default rt table\n");
ret = -EINVAL;
goto bail;
}
@@ -1470,6 +1468,8 @@ int ipa3_reset_rt(enum ipa_ip_type ip)
struct ipa3_rt_entry *rule;
struct ipa3_rt_entry *rule_next;
struct ipa3_rt_tbl_set *rset;
+ struct ipa3_hdr_entry *hdr_entry;
+ struct ipa3_hdr_proc_ctx_entry *hdr_proc_entry;
u32 apps_start_idx;
int id;
@@ -1513,13 +1513,35 @@ int ipa3_reset_rt(enum ipa_ip_type ip)
continue;
list_del(&rule->link);
+ if (rule->hdr) {
+ hdr_entry = ipa3_id_find(
+ rule->rule.hdr_hdl);
+ if (!hdr_entry ||
+ hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL(
+ "Header already deleted\n");
+ return -EINVAL;
+ }
+ } else if (rule->proc_ctx) {
+ hdr_proc_entry =
+ ipa3_id_find(
+ rule->rule.hdr_proc_ctx_hdl);
+ if (!hdr_proc_entry ||
+ hdr_proc_entry->cookie !=
+ IPA_PROC_HDR_COOKIE) {
+ IPAERR_RL(
+ "Proc entry already deleted\n");
+ return -EINVAL;
+ }
+ }
tbl->rule_cnt--;
if (rule->hdr)
__ipa3_release_hdr(rule->hdr->id);
else if (rule->proc_ctx)
__ipa3_release_hdr_proc_ctx(rule->proc_ctx->id);
rule->cookie = 0;
- idr_remove(tbl->rule_ids, rule->rule_id);
+ if (!rule->rule_id_valid)
+ idr_remove(tbl->rule_ids, rule->rule_id);
id = rule->id;
kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
@@ -1697,6 +1719,10 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
goto error;
}
+ if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) {
+ IPAERR_RL("Default tbl rule cannot be modified\n");
+ return -EINVAL;
+ }
/* Adding check to confirm still
* header entry present in header table or not
*/
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index e81c562b3b6d..491f57087e1d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -4338,7 +4338,7 @@ static int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
return -EINVAL;
}
- if (iface < 0 || iface > IPA_VLAN_IF_MAX) {
+ if (iface < 0 || iface >= IPA_VLAN_IF_MAX) {
IPAERR("invalid iface %d\n", iface);
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 23dafe15ca78..f5c4a1cf437a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1073,8 +1073,12 @@ static int __ipa_wwan_close(struct net_device *dev)
*/
static int ipa3_wwan_stop(struct net_device *dev)
{
+ struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+
IPAWANDBG("[%s] ipa3_wwan_stop()\n", dev->name);
__ipa_wwan_close(dev);
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ napi_disable(&(wwan_ptr->napi));
netif_stop_queue(dev);
return 0;
}
@@ -3757,6 +3761,15 @@ int rmnet_ipa3_send_lan_client_msg(
IPAWANERR("Can't allocate memory for tether_info\n");
return -ENOMEM;
}
+
+ if (data->client_event != IPA_PER_CLIENT_STATS_CONNECT_EVENT &&
+ data->client_event != IPA_PER_CLIENT_STATS_DISCONNECT_EVENT) {
+ IPAWANERR("Wrong event given. Event:- %d\n",
+ data->client_event);
+ kfree(lan_client);
+ return -EINVAL;
+ }
+ data->lan_client.lanIface[IPA_RESOURCE_NAME_MAX-1] = '\0';
memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
memcpy(lan_client, &data->lan_client,
sizeof(struct ipa_lan_client_msg));
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 6ce21613d5ce..ed4b837017fe 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -317,7 +317,9 @@ static int geni_se_select_fifo_mode(void __iomem *base)
static int geni_se_select_dma_mode(void __iomem *base)
{
+ int proto = get_se_proto(base);
unsigned int geni_dma_mode = 0;
+ unsigned int common_geni_m_irq_en;
geni_write_reg(0, base, SE_GSI_EVENT_EN);
geni_write_reg(0xFFFFFFFF, base, SE_GENI_M_IRQ_CLEAR);
@@ -326,6 +328,12 @@ static int geni_se_select_dma_mode(void __iomem *base)
geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR);
geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN);
+ common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
+ if (proto != UART)
+ common_geni_m_irq_en &=
+ ~(M_TX_FIFO_WATERMARK_EN | M_RX_FIFO_WATERMARK_EN);
+
+ geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
geni_dma_mode |= GENI_DMA_MODE_EN;
geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 8ac7a3b3c719..fe34d881fc2e 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -675,7 +675,8 @@ int sps_get_bam_debug_info(unsigned long dev, u32 option, u32 para,
/* Search for the target BAM device */
bam = sps_h2bam(dev);
if (bam == NULL) {
- pr_err("sps:Can't find any BAM with handle 0x%lx.", dev);
+ pr_err("sps:Can't find any BAM with handle 0x%pK.",
+ (void *)dev);
mutex_unlock(&sps->lock);
return SPS_ERROR;
}
@@ -1226,7 +1227,7 @@ struct sps_bam *sps_h2bam(unsigned long h)
{
struct sps_bam *bam;
- SPS_DBG1(sps, "sps:%s: BAM handle:0x%lx.", __func__, h);
+ SPS_DBG1(sps, "sps:%s: BAM handle:0x%pK.", __func__, (void *)h);
if (h == SPS_DEV_HANDLE_MEM || h == SPS_DEV_HANDLE_INVALID)
return NULL;
@@ -1236,7 +1237,7 @@ struct sps_bam *sps_h2bam(unsigned long h)
return bam;
}
- SPS_ERR(sps, "sps:Can't find BAM device for handle 0x%lx.", h);
+ SPS_ERR(sps, "sps:Can't find BAM device for handle 0x%pK.", (void *)h);
return NULL;
}
@@ -1341,16 +1342,17 @@ int sps_connect(struct sps_pipe *h, struct sps_connect *connect)
bam = sps_h2bam(dev);
if (bam == NULL) {
- SPS_ERR(sps, "sps:Invalid BAM device handle: 0x%lx", dev);
+ SPS_ERR(sps, "sps:Invalid BAM device handle: 0x%pK",
+ (void *)dev);
result = SPS_ERROR;
goto exit_err;
}
mutex_lock(&bam->lock);
- SPS_DBG2(bam, "sps:sps_connect: bam %pa src 0x%lx dest 0x%lx mode %s",
+ SPS_DBG2(bam, "sps:sps_connect: bam %pa src 0x%pK dest 0x%pK mode %s",
BAM_ID(bam),
- connect->source,
- connect->destination,
+ (void *)connect->source,
+ (void *)connect->destination,
connect->mode == SPS_MODE_SRC ? "SRC" : "DEST");
/* Allocate resources for the specified connection */
@@ -1414,10 +1416,10 @@ int sps_disconnect(struct sps_pipe *h)
}
SPS_DBG2(bam,
- "sps:sps_disconnect: bam %pa src 0x%lx dest 0x%lx mode %s",
+ "sps:sps_disconnect: bam %pa src 0x%pK dest 0x%pK mode %s",
BAM_ID(bam),
- pipe->connect.source,
- pipe->connect.destination,
+ (void *)pipe->connect.source,
+ (void *)pipe->connect.destination,
pipe->connect.mode == SPS_MODE_SRC ? "SRC" : "DEST");
result = SPS_ERROR;
@@ -1813,7 +1815,8 @@ int sps_device_reset(unsigned long dev)
/* Search for the target BAM device */
bam = sps_h2bam(dev);
if (bam == NULL) {
- SPS_ERR(sps, "sps:Invalid BAM device handle: 0x%lx", dev);
+ SPS_ERR(sps, "sps:Invalid BAM device handle: 0x%pK",
+ (void *)dev);
result = SPS_ERROR;
goto exit_err;
}
@@ -1824,7 +1827,8 @@ int sps_device_reset(unsigned long dev)
result = sps_bam_reset(bam);
mutex_unlock(&bam->lock);
if (result) {
- SPS_ERR(sps, "sps:Fail to reset BAM device: 0x%lx", dev);
+ SPS_ERR(sps, "sps:Fail to reset BAM device: 0x%pK",
+ (void *)dev);
goto exit_err;
}
diff --git a/drivers/platform/msm/sps/sps_bam.c b/drivers/platform/msm/sps/sps_bam.c
index c1ab20ca9be1..91b5daf58465 100644
--- a/drivers/platform/msm/sps/sps_bam.c
+++ b/drivers/platform/msm/sps/sps_bam.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -888,8 +888,8 @@ int sps_bam_pipe_connect(struct sps_pipe *bam_pipe,
else
iova = bam_pipe->connect.source_iova;
SPS_DBG2(dev,
- "sps:BAM %pa pipe %d uses IOVA 0x%lx.\n",
- BAM_ID(dev), pipe_index, iova);
+ "sps:BAM %pa pipe %d uses IOVA 0x%pK.\n",
+ BAM_ID(dev), pipe_index, (void *)iova);
hw_params.peer_phys_addr = (u32)iova;
} else {
hw_params.peer_phys_addr = peer_bam->props.phys_addr;
@@ -911,9 +911,9 @@ int sps_bam_pipe_connect(struct sps_pipe *bam_pipe,
hw_params.data_base =
(phys_addr_t)bam_pipe->connect.data.iova;
SPS_DBG2(dev,
- "sps:BAM %pa pipe %d uses IOVA 0x%lx for data FIFO.\n",
+ "sps:BAM %pa pipe %d uses IOVA 0x%pK for data FIFO.\n",
BAM_ID(dev), pipe_index,
- bam_pipe->connect.data.iova);
+ (void *)(bam_pipe->connect.data.iova));
} else {
hw_params.data_base = map->data.phys_base;
}
@@ -964,9 +964,9 @@ int sps_bam_pipe_connect(struct sps_pipe *bam_pipe,
hw_params.desc_base =
(phys_addr_t)bam_pipe->connect.desc.iova;
SPS_DBG2(dev,
- "sps:BAM %pa pipe %d uses IOVA 0x%lx for desc FIFO.\n",
+ "sps:BAM %pa pipe %d uses IOVA 0x%pK for desc FIFO.\n",
BAM_ID(dev), pipe_index,
- bam_pipe->connect.desc.iova);
+ (void *)(bam_pipe->connect.desc.iova));
} else {
hw_params.desc_base = map->desc.phys_base;
}
@@ -1417,8 +1417,9 @@ int sps_bam_pipe_transfer_one(struct sps_bam *dev,
u32 next_write;
static int show_recom;
- SPS_DBG(dev, "sps:BAM %pa pipe %d addr 0x%x size 0x%x flags 0x%x\n",
- BAM_ID(dev), pipe_index, addr, size, flags);
+ SPS_DBG(dev, "sps:BAM %pa pipe %d addr 0x%pK size 0x%x flags 0x%x\n",
+ BAM_ID(dev), pipe_index,
+ (void *)(long)addr, size, flags);
/* Is this a BAM-to-BAM or satellite connection? */
if ((pipe->state & (BAM_STATE_BAM2BAM | BAM_STATE_REMOTE))) {
@@ -1944,8 +1945,8 @@ static void pipe_handler_eot(struct sps_bam *dev, struct sps_pipe *pipe)
user = &pipe->sys.user_ptrs[offset / sizeof(struct sps_iovec)];
for (;;) {
SPS_DBG(dev,
- "sps:%s; pipe index:%d; iovec addr:0x%x; size:0x%x; flags:0x%x; enabled:0x%x; *user is %s NULL.\n",
- __func__, pipe->pipe_index, cache->addr,
+ "sps:%s; pipe index:%d; iovec addr:0x%pK; size:0x%x; flags:0x%x; enabled:0x%x; *user is %s NULL.\n",
+ __func__, pipe->pipe_index, (void *)(long)cache->addr,
cache->size, cache->flags, enabled,
(*user == NULL) ? "" : "not");
@@ -2233,8 +2234,8 @@ int sps_bam_pipe_get_iovec(struct sps_bam *dev, u32 pipe_index,
pipe->sys.acked_offset = 0;
SPS_DBG(dev,
- "sps:%s; pipe index:%d; iovec addr:0x%x; size:0x%x; flags:0x%x; acked_offset:0x%x.\n",
- __func__, pipe->pipe_index, desc->addr,
+ "sps:%s; pipe index:%d; iovec addr:0x%pK; size:0x%x; flags:0x%x; acked_offset:0x%x.\n",
+ __func__, pipe->pipe_index, (void *)(long)desc->addr,
desc->size, desc->flags, pipe->sys.acked_offset);
return 0;
diff --git a/drivers/platform/msm/sps/sps_dma.c b/drivers/platform/msm/sps/sps_dma.c
index abdcabc8cddd..0cc428399ecf 100644
--- a/drivers/platform/msm/sps/sps_dma.c
+++ b/drivers/platform/msm/sps/sps_dma.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, 2015, 2017, The Linux Foundation. All rights
+/* Copyright (c) 2011-2013, 2015, 2017, 2019, The Linux Foundation. All rights
* reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -381,7 +381,7 @@ int sps_dma_device_de_init(unsigned long h)
dev = sps_dma_find_device(h);
if (dev == NULL) {
- SPS_ERR(sps, "sps:BAM-DMA: not registered: %lx", h);
+ SPS_ERR(sps, "sps:BAM-DMA: not registered: %pK", (void *)h);
result = SPS_ERROR;
goto exit_err;
}
@@ -547,8 +547,8 @@ int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
dev = sps_dma_find_device(alloc->dev);
if (dev == NULL) {
- SPS_ERR(sps, "sps:BAM-DMA: invalid BAM handle: %lx",
- alloc->dev);
+ SPS_ERR(sps, "sps:BAM-DMA: invalid BAM handle: %pK",
+ (void *)alloc->dev);
goto exit_err;
}
@@ -621,7 +621,8 @@ int sps_free_dma_chan(struct sps_dma_chan *chan)
dev = sps_dma_find_device(chan->dev);
if (dev == NULL) {
- SPS_ERR(sps, "sps:BAM-DMA: invalid BAM handle: %lx", chan->dev);
+ SPS_ERR(sps, "sps:BAM-DMA: invalid BAM handle: %pK",
+ (void *)chan->dev);
result = SPS_ERROR;
goto exit_err;
}
diff --git a/drivers/platform/msm/sps/sps_mem.c b/drivers/platform/msm/sps/sps_mem.c
index 105135a0e022..f5e026ba62ed 100644
--- a/drivers/platform/msm/sps/sps_mem.c
+++ b/drivers/platform/msm/sps/sps_mem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, 2015, 2017, The Linux Foundation.
+/* Copyright (c) 2011-2013, 2015, 2017, 2019, The Linux Foundation.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -75,8 +75,8 @@ phys_addr_t sps_mem_alloc_io(u32 bytes)
return SPS_ADDR_INVALID;
}
- SPS_DBG3(sps, "sps:sps_mem_alloc_io.phys=%pa.virt=0x%lx.size=0x%x.",
- &phys_addr, virt_addr, bytes);
+ SPS_DBG3(sps, "sps:sps_mem_alloc_io.phys=%pa.virt=0x%pK.size=0x%x.",
+ &phys_addr, (void *)virt_addr, bytes);
return phys_addr;
}
@@ -92,8 +92,8 @@ void sps_mem_free_io(phys_addr_t phys_addr, u32 bytes)
iomem_offset = phys_addr - iomem_phys;
virt_addr = (uintptr_t) iomem_virt + iomem_offset;
- SPS_DBG3(sps, "sps:sps_mem_free_io.phys=%pa.virt=0x%lx.size=0x%x.",
- &phys_addr, virt_addr, bytes);
+ SPS_DBG3(sps, "sps:sps_mem_free_io.phys=%pa.virt=0x%pK.size=0x%x.",
+ &phys_addr, (void *)virt_addr, bytes);
gen_pool_free(pool, virt_addr, bytes);
total_free += bytes;
diff --git a/drivers/platform/msm/sps/sps_rm.c b/drivers/platform/msm/sps/sps_rm.c
index 276b847979e1..58e365bc2239 100644
--- a/drivers/platform/msm/sps/sps_rm.c
+++ b/drivers/platform/msm/sps/sps_rm.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2015, 2017, 2019, The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -380,8 +381,8 @@ static struct sps_connection *sps_rm_create(struct sps_pipe *pipe)
map->src.bam = sps_h2bam(map->src.dev);
if (map->src.bam == NULL) {
if (map->src.dev != SPS_DEV_HANDLE_MEM) {
- SPS_ERR(sps, "sps:Invalid BAM handle: %pa",
- &map->src.dev);
+ SPS_ERR(sps, "sps:Invalid BAM handle: %pK",
+ (void *)(&map->src.dev));
goto exit_err;
}
map->src.pipe_index = SPS_BAM_PIPE_INVALID;
@@ -389,8 +390,8 @@ static struct sps_connection *sps_rm_create(struct sps_pipe *pipe)
map->dest.bam = sps_h2bam(map->dest.dev);
if (map->dest.bam == NULL) {
if (map->dest.dev != SPS_DEV_HANDLE_MEM) {
- SPS_ERR(sps, "sps:Invalid BAM handle: %pa",
- &map->dest.dev);
+ SPS_ERR(sps, "sps:Invalid BAM handle: %pK",
+ (void *)(&map->dest.dev));
goto exit_err;
}
map->dest.pipe_index = SPS_BAM_PIPE_INVALID;
@@ -399,8 +400,8 @@ static struct sps_connection *sps_rm_create(struct sps_pipe *pipe)
/* Check the BAM device for the pipe */
if ((dir == SPS_MODE_SRC && map->src.bam == NULL) ||
(dir != SPS_MODE_SRC && map->dest.bam == NULL)) {
- SPS_ERR(sps, "sps:Invalid BAM endpt: dir %d src %pa dest %pa",
- dir, &map->src.dev, &map->dest.dev);
+ SPS_ERR(sps, "sps:Invalid BAM endpt: dir %d src %pK dest %pK",
+ dir, (void *)(&map->src.dev), (void *)(&map->dest.dev));
goto exit_err;
}
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 07bad4991efc..bfd117469ffe 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -790,16 +790,9 @@ source "drivers/soc/qcom/wcnss/Kconfig"
config QCOM_HYP_CORE_CTL
bool "CPU reservation scheme for Hypervisor"
+ depends on OKL4_GUEST
help
This driver reserve the specified CPUS by isolating them. The reserved
CPUs can be assigned to the other guest OS by the hypervisor.
An offline CPU is considered as a reserved CPU since this OS can't use
it.
-
-config QCOM_HYP_CORE_CTL_RESERVE_CPUS
- string "Reserve CPUs for HYP_CORE_CTL"
- depends on QCOM_HYP_CORE_CTL
- default "4-5" if ARCH_SDM670
- help
- A compile time knob for specifying the cpumask that contains the CPUs
- to be reserved by the QCOM_HYP_CORE_CTL driver.
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index da122fc82e73..28f1ba73547e 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -459,6 +459,9 @@ static int fifo_read(struct edge_info *einfo, void *_data, int len)
uint32_t fifo_size = einfo->rx_fifo_size;
uint32_t n;
+ if (read_index >= fifo_size || write_index >= fifo_size)
+ return 0;
+
while (len) {
ptr = einfo->rx_fifo + read_index;
if (read_index <= write_index)
@@ -505,6 +508,9 @@ static int fifo_write_body(struct edge_info *einfo, const void *_data,
uint32_t fifo_size = einfo->tx_fifo_size;
uint32_t n;
+ if (read_index >= fifo_size || *write_index >= fifo_size)
+ return 0;
+
while (len) {
ptr = einfo->tx_fifo + *write_index;
if (*write_index < read_index) {
diff --git a/drivers/soc/qcom/glink_spi_xprt.c b/drivers/soc/qcom/glink_spi_xprt.c
index a08c4bfde4a2..efcd94cdda01 100644
--- a/drivers/soc/qcom/glink_spi_xprt.c
+++ b/drivers/soc/qcom/glink_spi_xprt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -484,6 +484,13 @@ static int glink_spi_xprt_rx_cmd(struct edge_info *einfo, void *dst,
int ret;
read_id = einfo->rx_fifo_read;
+ if ((read_id > (einfo->rx_fifo_start + einfo->fifo_size)) ||
+ (read_id < einfo->rx_fifo_start)) {
+ pr_err("%s: Invalid rx_fifo_read: %d, start: %d, size: %d\n",
+ __func__, read_id, einfo->rx_fifo_start,
+ einfo->fifo_size);
+ return -EINVAL;
+ }
do {
if ((read_id + size_to_read) >=
(einfo->rx_fifo_start + einfo->fifo_size))
@@ -722,11 +729,11 @@ static void process_rx_cmd(struct edge_info *einfo,
struct rx_short_data_desc {
unsigned char data[SHORT_PKT_SIZE];
};
- struct command *cmd;
+ struct command *cmd = NULL;
struct intent_desc *intents;
struct rx_desc *rx_descp;
struct rx_short_data_desc *rx_sd_descp;
- int offset = 0;
+ uint64_t offset = 0;
int rcu_id;
uint16_t rcid;
uint16_t name_len;
@@ -742,6 +749,8 @@ static void process_rx_cmd(struct edge_info *einfo,
}
while (offset < rx_size) {
+ if (offset + sizeof(*cmd) > rx_size)
+ goto err;
cmd = (struct command *)(rx_data + offset);
offset += sizeof(*cmd);
switch (cmd->id) {
@@ -760,7 +769,12 @@ static void process_rx_cmd(struct edge_info *einfo,
case OPEN_CMD:
rcid = cmd->param1;
name_len = (uint16_t)(cmd->param2 & 0xFFFF);
+ if (name_len > GLINK_NAME_SIZE)
+ goto err;
prio = (uint16_t)((cmd->param2 & 0xFFFF0000) >> 16);
+ if (offset + ALIGN(name_len, FIFO_ALIGNMENT) >
+ rx_size)
+ goto err;
name = (char *)(rx_data + offset);
offset += ALIGN(name_len, FIFO_ALIGNMENT);
einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
@@ -786,6 +800,8 @@ static void process_rx_cmd(struct edge_info *einfo,
case RX_INTENT_CMD:
for (i = 0; i < cmd->param2; i++) {
+ if (offset + sizeof(*intents) > rx_size)
+ goto err;
intents = (struct intent_desc *)
(rx_data + offset);
offset += sizeof(*intents);
@@ -821,6 +837,8 @@ static void process_rx_cmd(struct edge_info *einfo,
case TX_DATA_CONT_CMD:
case TRACER_PKT_CMD:
case TRACER_PKT_CONT_CMD:
+ if (offset + sizeof(*rx_descp) > rx_size)
+ goto err;
rx_descp = (struct rx_desc *)(rx_data + offset);
offset += sizeof(*rx_descp);
process_rx_data(einfo, cmd->id, cmd->param1,
@@ -830,6 +848,8 @@ static void process_rx_cmd(struct edge_info *einfo,
break;
case TX_SHORT_DATA_CMD:
+ if (offset + sizeof(*rx_sd_descp) > rx_size)
+ goto err;
rx_sd_descp = (struct rx_short_data_desc *)
(rx_data + offset);
offset += sizeof(*rx_sd_descp);
@@ -858,6 +878,13 @@ static void process_rx_cmd(struct edge_info *einfo,
}
}
srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+err:
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ if (cmd)
+ pr_err("%s: invalid size of rx_data: %d, cmd : %d\n",
+ __func__, rx_size, cmd->id);
+ return;
}
/**
diff --git a/drivers/soc/qcom/hyp_core_ctl.c b/drivers/soc/qcom/hyp_core_ctl.c
index 3ed89fefe856..3b1259c6a433 100644
--- a/drivers/soc/qcom/hyp_core_ctl.c
+++ b/drivers/soc/qcom/hyp_core_ctl.c
@@ -19,6 +19,31 @@
#include <linux/sched/rt.h>
#include <linux/slab.h>
#include <linux/cpuhotplug.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/cpu_cooling.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+
+#include <microvisor/microvisor.h>
+
+#define MAX_RESERVE_CPUS (num_possible_cpus()/2)
+
+/**
+ * struct hyp_core_ctl_cpumap - vcpu to pcpu mapping for the other guest
+ * @sid: System call id to be used while referring to this vcpu
+ * @pcpu: The physical CPU number corresponding to this vcpu
+ * @curr_pcpu: The current physical CPU number corresponding to this vcpu.
+ * The curr_pcu is set to another CPU when the original assigned
+ * CPU i.e pcpu can't be used due to thermal condition.
+ *
+ */
+struct hyp_core_ctl_cpu_map {
+ okl4_kcap_t sid;
+ okl4_cpu_id_t pcpu;
+ okl4_cpu_id_t curr_pcpu;
+};
/**
* struct hyp_core_ctl_data - The private data structure of this driver
@@ -26,20 +51,28 @@
* @task: task_struct pointer to the thread running the state machine
* @pending: state machine work pending status
* @reservation_enabled: status of the reservation
- *
+ * @reservation_mutex: synchronization between thermal handling and
+ * reservation. The physical CPUs are re-assigned
+ * during thermal conditions while reservation is
+ * not enabled. So this synchronization is needed.
* @reserve_cpus: The CPUs to be reserved. input.
* @our_isolated_cpus: The CPUs isolated by hyp_core_ctl driver. output.
* @final_reserved_cpus: The CPUs reserved for the Hypervisor. output.
*
+ * @syscall_id: The system call id for manipulating vcpu to pcpu mappings.
+ * @cpumap: The vcpu to pcpu mapping table
*/
struct hyp_core_ctl_data {
spinlock_t lock;
struct task_struct *task;
bool pending;
bool reservation_enabled;
+ struct mutex reservation_mutex;
cpumask_t reserve_cpus;
cpumask_t our_isolated_cpus;
cpumask_t final_reserved_cpus;
+ okl4_kcap_t syscall_id;
+ struct hyp_core_ctl_cpu_map cpumap[NR_CPUS];
};
#define CREATE_TRACE_POINTS
@@ -51,12 +84,13 @@ static inline void hyp_core_ctl_print_status(char *msg)
{
trace_hyp_core_ctl_status(the_hcd, msg);
- pr_debug("%s: reserve=%*pbl reserved=%*pbl our_isolated=%*pbl online=%*pbl isolated=%*pbl\n",
+ pr_debug("%s: reserve=%*pbl reserved=%*pbl our_isolated=%*pbl online=%*pbl isolated=%*pbl thermal=%*pbl\n",
msg, cpumask_pr_args(&the_hcd->reserve_cpus),
cpumask_pr_args(&the_hcd->final_reserved_cpus),
cpumask_pr_args(&the_hcd->our_isolated_cpus),
cpumask_pr_args(cpu_online_mask),
- cpumask_pr_args(cpu_isolated_mask));
+ cpumask_pr_args(cpu_isolated_mask),
+ cpumask_pr_args(cpu_cooling_get_max_level_cpumask()));
}
static void hyp_core_ctl_undo_reservation(struct hyp_core_ctl_data *hcd)
@@ -79,16 +113,128 @@ static void hyp_core_ctl_undo_reservation(struct hyp_core_ctl_data *hcd)
static void finalize_reservation(struct hyp_core_ctl_data *hcd, cpumask_t *temp)
{
+ cpumask_t vcpu_adjust_mask;
+ int i, orig_cpu, curr_cpu, replacement_cpu;
+ okl4_error_t err;
+
+ /*
+ * When thermal conditions are not present, we return
+ * from here.
+ */
if (cpumask_equal(temp, &hcd->final_reserved_cpus))
return;
+ /*
+ * When we can't match with the original reserve CPUs request,
+ * don't change the existing scheme. We can't assign the
+ * same physical CPU to multiple virtual CPUs.
+ *
+ * This may only happen when thermal isolate more CPUs.
+ */
+ if (cpumask_weight(temp) < cpumask_weight(&hcd->reserve_cpus)) {
+ pr_debug("Fail to reserve some CPUs\n");
+ return;
+ }
+
cpumask_copy(&hcd->final_reserved_cpus, temp);
+ cpumask_clear(&vcpu_adjust_mask);
+
+ /*
+ * In the first pass, we traverse all virtual CPUs and try
+ * to assign their original physical CPUs if they are
+ * reserved. if the original physical CPU is not reserved,
+ * then check the current physical CPU is reserved or not.
+ * so that we continue to use the current physical CPU.
+ *
+ * If both original CPU and the current CPU are not reserved,
+ * we have to find a replacement. These virtual CPUs are
+ * maintained in vcpu_adjust_mask and processed in the 2nd pass.
+ */
+ for (i = 0; i < MAX_RESERVE_CPUS; i++) {
+ if (hcd->cpumap[i].sid == 0)
+ break;
+
+ orig_cpu = hcd->cpumap[i].pcpu;
+ curr_cpu = hcd->cpumap[i].curr_pcpu;
+
+ if (cpumask_test_cpu(orig_cpu, &hcd->final_reserved_cpus)) {
+ cpumask_clear_cpu(orig_cpu, temp);
+
+ if (orig_cpu == curr_cpu)
+ continue;
+
+ /*
+ * The original pcpu corresponding to this vcpu i.e i
+ * is available in final_reserved_cpus. so restore
+ * the assignment.
+ */
+ err = _okl4_sys_scheduler_affinity_set(hcd->syscall_id,
+ hcd->cpumap[i].sid, orig_cpu);
+ if (err != OKL4_ERROR_OK) {
+ pr_err("fail to assign pcpu for vcpu#%d\n", i);
+ continue;
+ }
+
+ hcd->cpumap[i].curr_pcpu = orig_cpu;
+ pr_debug("err=%u vcpu=%d pcpu=%u curr_cpu=%u\n",
+ err, i, hcd->cpumap[i].pcpu,
+ hcd->cpumap[i].curr_pcpu);
+ continue;
+ }
+
+ /*
+ * The original CPU is not available but the previously
+ * assigned CPU i.e curr_cpu is still available. so keep
+ * using it.
+ */
+ if (cpumask_test_cpu(curr_cpu, &hcd->final_reserved_cpus)) {
+ cpumask_clear_cpu(curr_cpu, temp);
+ continue;
+ }
+
+ /*
+ * A replacement CPU is found in the 2nd pass below. Make
+ * a note of this virtual CPU for which both original and
+ * current physical CPUs are not available in the
+ * final_reserved_cpus.
+ */
+ cpumask_set_cpu(i, &vcpu_adjust_mask);
+ }
+
+ /*
+ * The vcpu_adjust_mask contain the virtual CPUs that needs
+ * re-assignment. The temp CPU mask contains the remaining
+ * reserved CPUs. so we pick one by one from the remaining
+ * reserved CPUs and assign them to the pending virtual
+ * CPUs.
+ */
+ for_each_cpu(i, &vcpu_adjust_mask) {
+ replacement_cpu = cpumask_any(temp);
+ cpumask_clear_cpu(replacement_cpu, temp);
+
+ err = _okl4_sys_scheduler_affinity_set(hcd->syscall_id,
+ hcd->cpumap[i].sid, replacement_cpu);
+ if (err != OKL4_ERROR_OK) {
+ pr_err("fail to assign pcpu for vcpu#%d\n", i);
+ continue;
+ }
+
+ hcd->cpumap[i].curr_pcpu = replacement_cpu;
+ pr_debug("adjust err=%u vcpu=%d pcpu=%u curr_cpu=%u\n",
+ err, i, hcd->cpumap[i].pcpu,
+ hcd->cpumap[i].curr_pcpu);
+
+ }
+
+ /* Did we reserve more CPUs than needed? */
+ WARN_ON(!cpumask_empty(temp));
}
static void hyp_core_ctl_do_reservation(struct hyp_core_ctl_data *hcd)
{
cpumask_t offline_cpus, iter_cpus, temp_reserved_cpus;
- int i, ret;
+ int i, ret, iso_required, iso_done;
+ const cpumask_t *thermal_cpus = cpu_cooling_get_max_level_cpumask();
cpumask_clear(&offline_cpus);
cpumask_clear(&temp_reserved_cpus);
@@ -102,6 +248,7 @@ static void hyp_core_ctl_do_reservation(struct hyp_core_ctl_data *hcd)
* will be isolated to honor the reservation.
*/
cpumask_andnot(&iter_cpus, &hcd->reserve_cpus, &hcd->our_isolated_cpus);
+ cpumask_andnot(&iter_cpus, &iter_cpus, thermal_cpus);
for_each_cpu(i, &iter_cpus) {
if (!cpu_online(i)) {
@@ -111,12 +258,102 @@ static void hyp_core_ctl_do_reservation(struct hyp_core_ctl_data *hcd)
ret = sched_isolate_cpu(i);
if (ret < 0) {
- pr_err("fail to isolate CPU%d. ret=%d\n", i, ret);
+ pr_debug("fail to isolate CPU%d. ret=%d\n", i, ret);
continue;
}
cpumask_set_cpu(i, &hcd->our_isolated_cpus);
}
+ cpumask_andnot(&iter_cpus, &hcd->reserve_cpus, &offline_cpus);
+ iso_required = cpumask_weight(&iter_cpus);
+ iso_done = cpumask_weight(&hcd->our_isolated_cpus);
+
+ if (iso_done < iso_required) {
+ int isolate_need;
+
+ /*
+ * We have isolated fewer CPUs than required. This happens
+ * when some of the CPUs from the reserved_cpus mask
+ * are managed by thermal. Find the replacement CPUs and
+ * isolate them.
+ */
+ isolate_need = iso_required - iso_done;
+
+ /*
+ * Create a cpumask from which replacement CPUs can be
+ * picked. Exclude our isolated CPUs, thermal managed
+ * CPUs and offline CPUs, which are already considered
+ * as reserved.
+ */
+ cpumask_andnot(&iter_cpus, cpu_possible_mask,
+ &hcd->our_isolated_cpus);
+ cpumask_andnot(&iter_cpus, &iter_cpus, thermal_cpus);
+ cpumask_andnot(&iter_cpus, &iter_cpus, &offline_cpus);
+
+ /*
+ * Keep the replacement policy simple. The offline CPUs
+ * comes for free. so pick them first.
+ */
+ for_each_cpu(i, &iter_cpus) {
+ if (!cpu_online(i)) {
+ cpumask_set_cpu(i, &offline_cpus);
+ if (--isolate_need == 0)
+ goto done;
+ }
+ }
+
+ cpumask_andnot(&iter_cpus, &iter_cpus, &offline_cpus);
+
+ for_each_cpu(i, &iter_cpus) {
+ ret = sched_isolate_cpu(i);
+ if (ret < 0) {
+ pr_debug("fail to isolate CPU%d. ret=%d\n",
+ i, ret);
+ continue;
+ }
+ cpumask_set_cpu(i, &hcd->our_isolated_cpus);
+
+ if (--isolate_need == 0)
+ break;
+ }
+ } else if (iso_done > iso_required) {
+ int unisolate_need;
+
+ /*
+ * We have isolated more CPUs than required. Un-isolate
+ * the additional CPUs which are not part of the
+ * reserve_cpus mask.
+ *
+ * This happens in the following scenario.
+ *
+ * - Lets say reserve CPUs are CPU4 and CPU5. They are
+ * isolated.
+ * - CPU4 is isolated by thermal. We found CPU0 as the
+ * replacement CPU. Now CPU0 and CPU5 are isolated by
+ * us.
+ * - CPU4 is un-isolated by thermal. We first isolate CPU4
+ * since it is part of our reserve CPUs. Now CPU0, CPU4
+ * and CPU5 are isolated by us.
+ * - Since iso_done (3) > iso_required (2), un-isolate
+ * a CPU which is not part of the reserve CPU. i.e CPU0.
+ */
+ unisolate_need = iso_done - iso_required;
+ cpumask_andnot(&iter_cpus, &hcd->our_isolated_cpus,
+ &hcd->reserve_cpus);
+ for_each_cpu(i, &iter_cpus) {
+ ret = sched_unisolate_cpu(i);
+ if (ret < 0) {
+ pr_err("fail to unisolate CPU%d. ret=%d\n",
+ i, ret);
+ continue;
+ }
+ cpumask_clear_cpu(i, &hcd->our_isolated_cpus);
+ if (--unisolate_need == 0)
+ break;
+ }
+ }
+
+done:
cpumask_or(&temp_reserved_cpus, &hcd->our_isolated_cpus, &offline_cpus);
finalize_reservation(hcd, &temp_reserved_cpus);
@@ -144,15 +381,153 @@ static int hyp_core_ctl_thread(void *data)
if (kthread_should_stop())
break;
+ /*
+ * The reservation mutex synchronize the reservation
+ * happens in this thread against the thermal handling.
+ * The CPU re-assignment happens directly from the
+ * thermal callback context when the reservation is
+ * not enabled, since there is no need for isolating.
+ */
+ mutex_lock(&hcd->reservation_mutex);
if (hcd->reservation_enabled)
hyp_core_ctl_do_reservation(hcd);
else
hyp_core_ctl_undo_reservation(hcd);
+ mutex_unlock(&hcd->reservation_mutex);
}
return 0;
}
+static void hyp_core_ctl_handle_thermal(struct hyp_core_ctl_data *hcd,
+ int cpu, bool throttled)
+{
+ cpumask_t temp_mask, iter_cpus;
+ const cpumask_t *thermal_cpus = cpu_cooling_get_max_level_cpumask();
+ bool notify = false;
+ int replacement_cpu;
+
+ hyp_core_ctl_print_status("handle_thermal_start");
+
+ /*
+ * Take a copy of the final_reserved_cpus and adjust the mask
+ * based on the notified CPU's thermal state.
+ */
+ cpumask_copy(&temp_mask, &hcd->final_reserved_cpus);
+
+ if (throttled) {
+ /*
+ * Find a replacement CPU for this throttled CPU. Select
+ * any CPU that is not managed by thermal and not already
+ * part of the assigned CPUs.
+ */
+ cpumask_andnot(&iter_cpus, cpu_possible_mask, thermal_cpus);
+ cpumask_andnot(&iter_cpus, &iter_cpus,
+ &hcd->final_reserved_cpus);
+ replacement_cpu = cpumask_any(&iter_cpus);
+
+ if (replacement_cpu < nr_cpu_ids) {
+ cpumask_clear_cpu(cpu, &temp_mask);
+ cpumask_set_cpu(replacement_cpu, &temp_mask);
+ notify = true;
+ }
+ } else {
+ /*
+ * One of the original assigned CPU is unthrottled by thermal.
+ * Swap this CPU with any one of the replacement CPUs.
+ */
+ cpumask_andnot(&iter_cpus, &hcd->final_reserved_cpus,
+ &hcd->reserve_cpus);
+ replacement_cpu = cpumask_any(&iter_cpus);
+
+ if (replacement_cpu < nr_cpu_ids) {
+ cpumask_clear_cpu(replacement_cpu, &temp_mask);
+ cpumask_set_cpu(cpu, &temp_mask);
+ notify = true;
+ }
+ }
+
+ if (notify)
+ finalize_reservation(hcd, &temp_mask);
+
+ hyp_core_ctl_print_status("handle_thermal_end");
+}
+
+static int hyp_core_ctl_cpu_cooling_cb(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ int cpu = (long) data;
+ const cpumask_t *thermal_cpus = cpu_cooling_get_max_level_cpumask();
+
+ if (!the_hcd)
+ return NOTIFY_DONE;
+
+ mutex_lock(&the_hcd->reservation_mutex);
+
+ pr_debug("CPU%d is %s by thermal\n", cpu,
+ val ? "throttled" : "unthrottled");
+
+ if (val) {
+ /*
+ * The thermal mitigated CPU is not part of our reserved
+ * CPUs. So nothing to do.
+ */
+ if (!cpumask_test_cpu(cpu, &the_hcd->final_reserved_cpus))
+ goto out;
+
+ /*
+ * The thermal mitigated CPU is part of our reserved CPUs.
+ *
+ * If it is isolated by us, unisolate it. If it is not
+ * isolated, probably it is offline. In both cases, kick
+ * the state machine to find a replacement CPU.
+ */
+ if (cpumask_test_cpu(cpu, &the_hcd->our_isolated_cpus)) {
+ sched_unisolate_cpu(cpu);
+ cpumask_clear_cpu(cpu, &the_hcd->our_isolated_cpus);
+ }
+ } else {
+ /*
+ * A CPU is unblocked by thermal. We are interested if
+ *
+ * (1) This CPU is part of the original reservation request
+ * In this case, this CPU should be swapped with one of
+ * the replacement CPU that is currently reserved.
+ * (2) When some of the thermal mitigated CPUs are currently
+ * reserved due to unavailability of CPUs. Now that
+ * thermal unblocked a CPU, swap this with one of the
+ * thermal mitigated CPU that is currently reserved.
+ */
+ if (!cpumask_test_cpu(cpu, &the_hcd->reserve_cpus) &&
+ !cpumask_intersects(&the_hcd->final_reserved_cpus,
+ thermal_cpus))
+ goto out;
+ }
+
+ if (the_hcd->reservation_enabled) {
+ spin_lock(&the_hcd->lock);
+ the_hcd->pending = true;
+ wake_up_process(the_hcd->task);
+ spin_unlock(&the_hcd->lock);
+ } else {
+ /*
+ * When the reservation is enabled, the state machine
+ * takes care of finding the new replacement CPU or
+ * isolating the unthrottled CPU. However when the
+ * reservation is not enabled, we still want to
+ * re-assign another CPU for a throttled CPU.
+ */
+ hyp_core_ctl_handle_thermal(the_hcd, cpu, val);
+ }
+out:
+ mutex_unlock(&the_hcd->reservation_mutex);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block hyp_core_ctl_nb = {
+ .notifier_call = hyp_core_ctl_cpu_cooling_cb,
+};
+
static int hyp_core_ctl_hp_offline(unsigned int cpu)
{
if (!the_hcd || !the_hcd->reservation_enabled)
@@ -189,6 +564,78 @@ static int hyp_core_ctl_hp_online(unsigned int cpu)
return 0;
}
+static int hyp_core_ctl_init_reserve_cpus(struct hyp_core_ctl_data *hcd)
+{
+ struct _okl4_sys_scheduler_affinity_get_return result;
+ int i, ret = 0;
+
+ cpumask_clear(&hcd->reserve_cpus);
+
+ for (i = 0; i < MAX_RESERVE_CPUS; i++) {
+ if (hcd->cpumap[i].sid == 0)
+ break;
+
+ result = _okl4_sys_scheduler_affinity_get(hcd->syscall_id,
+ hcd->cpumap[i].sid);
+ if (result.error != OKL4_ERROR_OK) {
+ pr_err("fail to get pcpu for vcpu%d. err=%u\n",
+ i, result.error);
+ ret = -EPERM;
+ break;
+ }
+ hcd->cpumap[i].pcpu = result.cpu_index;
+ hcd->cpumap[i].curr_pcpu = result.cpu_index;
+ cpumask_set_cpu(hcd->cpumap[i].pcpu, &hcd->reserve_cpus);
+ pr_debug("vcpu%u map to pcpu%u\n", i, result.cpu_index);
+ }
+
+ cpumask_copy(&hcd->final_reserved_cpus, &hcd->reserve_cpus);
+ pr_info("reserve_cpus=%*pbl ret=%d\n",
+ cpumask_pr_args(&hcd->reserve_cpus), ret);
+
+ return ret;
+}
+
+static int hyp_core_ctl_parse_dt(struct platform_device *pdev,
+ struct hyp_core_ctl_data *hcd)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int len, ret, i;
+ u32 *reg_values;
+
+ len = of_property_count_u32_elems(np, "reg");
+ if (len < 2 || len > MAX_RESERVE_CPUS + 1) {
+ pr_err("incorrect reg dt param. err=%d\n", len);
+ return -EINVAL;
+ }
+
+ reg_values = kmalloc_array(len, sizeof(*reg_values), GFP_KERNEL);
+ if (!reg_values)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, "reg", reg_values, len);
+ if (ret < 0) {
+ pr_err("fail to read reg dt param. err=%d\n", ret);
+ return -EINVAL;
+ }
+
+ hcd->syscall_id = reg_values[0];
+
+ ret = 0;
+ for (i = 1; i < len; i++) {
+ if (reg_values[i] == 0) {
+ ret = -EINVAL;
+ pr_err("incorrect sid for vcpu%d\n", i);
+ }
+
+ hcd->cpumap[i-1].sid = reg_values[i];
+ pr_debug("vcpu=%d sid=%u\n", i-1, hcd->cpumap[i-1].sid);
+ }
+
+ kfree(reg_values);
+ return ret;
+}
+
static void hyp_core_ctl_enable(bool enable)
{
spin_lock(&the_hcd->lock);
@@ -223,16 +670,78 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- if (!the_hcd)
- return -EPERM;
-
return scnprintf(buf, PAGE_SIZE, "%u\n", the_hcd->reservation_enabled);
}
static DEVICE_ATTR_RW(enable);
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct hyp_core_ctl_data *hcd = the_hcd;
+ ssize_t count;
+ int i;
+
+ mutex_lock(&hcd->reservation_mutex);
+
+ count = scnprintf(buf, PAGE_SIZE, "enabled=%d\n",
+ hcd->reservation_enabled);
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "reserve_cpus=%*pbl\n",
+ cpumask_pr_args(&hcd->reserve_cpus));
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "reserved_cpus=%*pbl\n",
+ cpumask_pr_args(&hcd->final_reserved_cpus));
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "our_isolated_cpus=%*pbl\n",
+ cpumask_pr_args(&hcd->our_isolated_cpus));
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "online_cpus=%*pbl\n",
+ cpumask_pr_args(cpu_online_mask));
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "isolated_cpus=%*pbl\n",
+ cpumask_pr_args(cpu_isolated_mask));
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "thermal_cpus=%*pbl\n",
+ cpumask_pr_args(cpu_cooling_get_max_level_cpumask()));
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Vcpu to Pcpu mappings:\n");
+
+ for (i = 0; i < MAX_RESERVE_CPUS; i++) {
+ struct _okl4_sys_scheduler_affinity_get_return result;
+
+ if (hcd->cpumap[i].sid == 0)
+ break;
+
+ result = _okl4_sys_scheduler_affinity_get(hcd->syscall_id,
+ hcd->cpumap[i].sid);
+ if (result.error != OKL4_ERROR_OK)
+ continue;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "vcpu=%d pcpu=%u curr_pcpu=%u hyp_pcpu=%u\n",
+ i, hcd->cpumap[i].pcpu, hcd->cpumap[i].curr_pcpu,
+ result.cpu_index);
+
+ }
+
+ mutex_unlock(&hcd->reservation_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(status);
+
static struct attribute *hyp_core_ctl_attrs[] = {
&dev_attr_enable.attr,
+ &dev_attr_status.attr,
NULL
};
@@ -241,7 +750,74 @@ static struct attribute_group hyp_core_ctl_attr_group = {
.name = "hyp_core_ctl",
};
-static int __init hyp_core_ctl_init(void)
+#define CPULIST_SZ 32
+static ssize_t read_reserve_cpus(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char kbuf[CPULIST_SZ];
+ int ret;
+
+ ret = scnprintf(kbuf, CPULIST_SZ, "%*pbl\n",
+ cpumask_pr_args(&the_hcd->reserve_cpus));
+
+ return simple_read_from_buffer(ubuf, count, ppos, kbuf, ret);
+}
+
+static ssize_t write_reserve_cpus(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char kbuf[CPULIST_SZ];
+ int ret;
+ cpumask_t temp_mask;
+
+ ret = simple_write_to_buffer(kbuf, CPULIST_SZ - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ kbuf[ret] = '\0';
+ ret = cpulist_parse(kbuf, &temp_mask);
+ if (ret < 0)
+ return ret;
+
+ if (cpumask_weight(&temp_mask) !=
+ cpumask_weight(&the_hcd->reserve_cpus)) {
+ pr_err("incorrect reserve CPU count. expected=%u\n",
+ cpumask_weight(&the_hcd->reserve_cpus));
+ return -EINVAL;
+ }
+
+ spin_lock(&the_hcd->lock);
+ if (the_hcd->reservation_enabled) {
+ count = -EPERM;
+ pr_err("reservation is enabled, can't change reserve_cpus\n");
+ } else {
+ cpumask_copy(&the_hcd->reserve_cpus, &temp_mask);
+ }
+ spin_unlock(&the_hcd->lock);
+
+ return count;
+}
+
+static const struct file_operations debugfs_reserve_cpus_ops = {
+ .read = read_reserve_cpus,
+ .write = write_reserve_cpus,
+};
+
+static void hyp_core_ctl_debugfs_init(void)
+{
+ struct dentry *dir, *file;
+
+ dir = debugfs_create_dir("hyp_core_ctl", NULL);
+ if (IS_ERR_OR_NULL(dir))
+ return;
+
+ file = debugfs_create_file("reserve_cpus", 0644, dir, NULL,
+ &debugfs_reserve_cpus_ops);
+ if (!file)
+ debugfs_remove(dir);
+}
+
+static int hyp_core_ctl_probe(struct platform_device *pdev)
{
int ret;
struct hyp_core_ctl_data *hcd;
@@ -253,14 +829,20 @@ static int __init hyp_core_ctl_init(void)
goto out;
}
- ret = cpulist_parse(CONFIG_QCOM_HYP_CORE_CTL_RESERVE_CPUS,
- &hcd->reserve_cpus);
+ ret = hyp_core_ctl_parse_dt(pdev, hcd);
if (ret < 0) {
- pr_err("Incorrect default reserve CPUs. ret=%d\n", ret);
+ pr_err("Fail to parse dt. ret=%d\n", ret);
+ goto free_hcd;
+ }
+
+ ret = hyp_core_ctl_init_reserve_cpus(hcd);
+ if (ret < 0) {
+ pr_err("Fail to get reserve CPUs from Hyp. ret=%d\n", ret);
goto free_hcd;
}
spin_lock_init(&hcd->lock);
+ mutex_init(&hcd->reservation_mutex);
hcd->task = kthread_run(hyp_core_ctl_thread, (void *) hcd,
"hyp_core_ctl");
@@ -286,6 +868,9 @@ static int __init hyp_core_ctl_init(void)
"qcom/hyp_core_ctl:dead",
NULL, hyp_core_ctl_hp_offline);
+ cpu_cooling_max_level_notifier_register(&hyp_core_ctl_nb);
+ hyp_core_ctl_debugfs_init();
+
the_hcd = hcd;
return 0;
@@ -296,4 +881,21 @@ free_hcd:
out:
return ret;
}
-late_initcall(hyp_core_ctl_init);
+
+static const struct of_device_id hyp_core_ctl_match_table[] = {
+ { .compatible = "qcom,hyp-core-ctl" },
+ {},
+};
+
+static struct platform_driver hyp_core_ctl_driver = {
+ .probe = hyp_core_ctl_probe,
+ .driver = {
+ .name = "hyp_core_ctl",
+ .owner = THIS_MODULE,
+ .of_match_table = hyp_core_ctl_match_table,
+ },
+};
+
+builtin_platform_driver(hyp_core_ctl_driver);
+MODULE_DESCRIPTION("Core Control for Hypervisor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 32e69332e646..fd25e134c6ac 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -76,6 +76,8 @@ module_param(qmi_timeout, ulong, 0600);
#define ICNSS_MAX_PROBE_CNT 2
+#define PROBE_TIMEOUT 5000
+
#define icnss_ipc_log_string(_x...) do { \
if (icnss_ipc_log_context) \
ipc_log_string(icnss_ipc_log_context, _x); \
@@ -284,6 +286,8 @@ enum icnss_driver_state {
ICNSS_HOST_TRIGGERED_PDR,
ICNSS_FW_DOWN,
ICNSS_DRIVER_UNLOADING,
+ ICNSS_REJUVENATE,
+ ICNSS_BLOCK_SHUTDOWN,
};
struct ce_irq_list {
@@ -471,6 +475,7 @@ static struct icnss_priv {
u16 line_number;
char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
struct mutex dev_lock;
+ struct completion unblock_shutdown;
} *penv;
#ifdef CONFIG_ICNSS_DEBUG
@@ -1160,16 +1165,40 @@ bool icnss_is_fw_ready(void)
}
EXPORT_SYMBOL(icnss_is_fw_ready);
+void icnss_block_shutdown(bool status)
+{
+ if (!penv)
+ return;
+
+ if (status) {
+ set_bit(ICNSS_BLOCK_SHUTDOWN, &penv->state);
+ reinit_completion(&penv->unblock_shutdown);
+ } else {
+ clear_bit(ICNSS_BLOCK_SHUTDOWN, &penv->state);
+ complete(&penv->unblock_shutdown);
+ }
+}
+EXPORT_SYMBOL(icnss_block_shutdown);
+
bool icnss_is_fw_down(void)
{
if (!penv)
return false;
return test_bit(ICNSS_FW_DOWN, &penv->state) ||
- test_bit(ICNSS_PD_RESTART, &penv->state);
+ test_bit(ICNSS_PD_RESTART, &penv->state) ||
+ test_bit(ICNSS_REJUVENATE, &penv->state);
}
EXPORT_SYMBOL(icnss_is_fw_down);
+bool icnss_is_rejuvenate(void)
+{
+ if (!penv)
+ return false;
+ else
+ return test_bit(ICNSS_REJUVENATE, &penv->state);
+}
+EXPORT_SYMBOL(icnss_is_rejuvenate);
int icnss_power_off(struct device *dev)
{
@@ -1280,6 +1309,7 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
struct wlfw_msa_info_req_msg_v01 req;
struct wlfw_msa_info_resp_msg_v01 resp;
struct msg_desc req_desc, resp_desc;
+ uint64_t max_mapped_addr;
if (!penv || !penv->wlfw_clnt)
return -ENODEV;
@@ -1326,9 +1356,23 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
goto out;
}
+ max_mapped_addr = penv->msa_pa + penv->msa_mem_size;
penv->stats.msa_info_resp++;
penv->nr_mem_region = resp.mem_region_info_len;
for (i = 0; i < resp.mem_region_info_len; i++) {
+
+ if (resp.mem_region_info[i].size > penv->msa_mem_size ||
+ resp.mem_region_info[i].region_addr > max_mapped_addr ||
+ resp.mem_region_info[i].region_addr < penv->msa_pa ||
+ resp.mem_region_info[i].size +
+ resp.mem_region_info[i].region_addr > max_mapped_addr) {
+ icnss_pr_dbg("Received out of range Addr: 0x%llx Size: 0x%x\n",
+ resp.mem_region_info[i].region_addr,
+ resp.mem_region_info[i].size);
+ ret = -EINVAL;
+ goto fail_unwind;
+ }
+
penv->mem_region[i].reg_addr =
resp.mem_region_info[i].region_addr;
penv->mem_region[i].size =
@@ -1343,6 +1387,8 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
return 0;
+fail_unwind:
+ memset(&penv->mem_region[0], 0, sizeof(penv->mem_region[0]) * i);
out:
penv->stats.msa_info_err++;
ICNSS_QMI_ASSERT();
@@ -2076,6 +2122,7 @@ static void icnss_qmi_wlfw_clnt_ind(struct qmi_handle *handle,
event_data->crashed = true;
event_data->fw_rejuvenate = true;
fw_down_data.crashed = true;
+ set_bit(ICNSS_REJUVENATE, &penv->state);
icnss_call_driver_uevent(penv, ICNSS_UEVENT_FW_DOWN,
&fw_down_data);
icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
@@ -2212,6 +2259,7 @@ static int icnss_call_driver_probe(struct icnss_priv *priv)
icnss_hw_power_on(priv);
+ icnss_block_shutdown(true);
while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
ret = priv->ops->probe(&priv->pdev->dev);
probe_cnt++;
@@ -2221,9 +2269,11 @@ static int icnss_call_driver_probe(struct icnss_priv *priv)
if (ret < 0) {
icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
ret, priv->state, probe_cnt);
+ icnss_block_shutdown(false);
goto out;
}
+ icnss_block_shutdown(false);
set_bit(ICNSS_DRIVER_PROBED, &priv->state);
return 0;
@@ -2261,6 +2311,7 @@ static int icnss_pd_restart_complete(struct icnss_priv *priv)
icnss_call_driver_shutdown(priv);
+ clear_bit(ICNSS_REJUVENATE, &penv->state);
clear_bit(ICNSS_PD_RESTART, &priv->state);
priv->early_crash_ind = false;
@@ -2360,6 +2411,7 @@ static int icnss_driver_event_register_driver(void *data)
if (ret)
goto out;
+ icnss_block_shutdown(true);
while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
ret = penv->ops->probe(&penv->pdev->dev);
probe_cnt++;
@@ -2369,9 +2421,11 @@ static int icnss_driver_event_register_driver(void *data)
if (ret) {
icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
ret, penv->state, probe_cnt);
+ icnss_block_shutdown(false);
goto power_off;
}
+ icnss_block_shutdown(false);
set_bit(ICNSS_DRIVER_PROBED, &penv->state);
return 0;
@@ -2629,6 +2683,13 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
if (code != SUBSYS_BEFORE_SHUTDOWN)
return NOTIFY_OK;
+ if (code == SUBSYS_BEFORE_SHUTDOWN && !notif->crashed &&
+ test_bit(ICNSS_BLOCK_SHUTDOWN, &priv->state)) {
+ if (!wait_for_completion_timeout(&priv->unblock_shutdown,
+ PROBE_TIMEOUT))
+ icnss_pr_err("wlan driver probe timeout\n");
+ }
+
if (test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
set_bit(ICNSS_FW_DOWN, &priv->state);
icnss_ignore_qmi_timeout(true);
@@ -3130,6 +3191,8 @@ EXPORT_SYMBOL(icnss_disable_irq);
int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info)
{
+ char *fw_build_timestamp = NULL;
+
if (!penv || !dev) {
icnss_pr_err("Platform driver not initialized\n");
return -EINVAL;
@@ -3142,6 +3205,8 @@ int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info)
info->board_id = penv->board_info.board_id;
info->soc_id = penv->soc_info.soc_id;
info->fw_version = penv->fw_version_info.fw_version;
+ fw_build_timestamp = penv->fw_version_info.fw_build_timestamp;
+ fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01] = '\0';
strlcpy(info->fw_build_timestamp,
penv->fw_version_info.fw_build_timestamp,
QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1);
@@ -3963,8 +4028,14 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
case ICNSS_FW_DOWN:
seq_puts(s, "FW DOWN");
continue;
+ case ICNSS_REJUVENATE:
+ seq_puts(s, "FW REJUVENATE");
+ continue;
case ICNSS_DRIVER_UNLOADING:
seq_puts(s, "DRIVER UNLOADING");
+ continue;
+ case ICNSS_BLOCK_SHUTDOWN:
+ seq_puts(s, "BLOCK SHUTDOWN");
}
seq_printf(s, "UNKNOWN-%d", i);
@@ -4636,6 +4707,8 @@ static int icnss_probe(struct platform_device *pdev)
penv = priv;
+ init_completion(&priv->unblock_shutdown);
+
icnss_pr_info("Platform driver probed successfully\n");
return 0;
@@ -4658,6 +4731,8 @@ static int icnss_remove(struct platform_device *pdev)
icnss_debugfs_destroy(penv);
+ complete_all(&penv->unblock_shutdown);
+
icnss_modem_ssr_unregister_notifier(penv);
destroy_ramdump_device(penv->msa0_dump_dev);
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
index 696c043d8de4..6542861a2f48 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.c
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -406,6 +406,7 @@ static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
memblock[i].peripheral ==
DHMS_MEM_PROC_MPSS_V01 &&
!memblock[i].guarantee &&
+ !memblock[i].client_request &&
memblock[i].allotted &&
!memblock[i].alloc_request) {
pr_debug("memshare: hypervisor unmapping for client id: %d\n",
@@ -665,9 +666,10 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h)
__func__);
flag = 1;
} else if (!memblock[client_id].guarantee &&
- memblock[client_id].allotted) {
- pr_debug("memshare: %s: size: %d",
- __func__, memblock[client_id].size);
+ !memblock[client_id].client_request &&
+ memblock[client_id].allotted) {
+ pr_debug("memshare: %s:client_id:%d - size: %d",
+ __func__, client_id, memblock[client_id].size);
ret = hyp_assign_phys(memblock[client_id].phy_addr,
memblock[client_id].size, source_vmlist, 1,
dest_vmids, dest_perms, 1);
@@ -676,8 +678,8 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h)
* This is an error case as hyp mapping was successful
* earlier but during unmap it lead to failure.
*/
- pr_err("memshare: %s, failed to unmap the region\n",
- __func__);
+ pr_err("memshare: %s, failed to unmap the region for client id:%d\n",
+ __func__, client_id);
}
size = memblock[client_id].size;
if (memblock[client_id].client_id == 1) {
@@ -696,8 +698,8 @@ static int handle_free_generic_req(void *req_h, void *req, void *conn_h)
attrs);
free_client(client_id);
} else {
- pr_err("memshare: %s, Request came for a guaranteed client cannot free up the memory\n",
- __func__);
+ pr_err("memshare: %s, Request came for a guaranteed client (client_id: %d) cannot free up the memory\n",
+ __func__, client_id);
}
if (flag) {
@@ -992,6 +994,10 @@ static int memshare_child_probe(struct platform_device *pdev)
pdev->dev.of_node,
"qcom,allocate-boot-time");
+ memblock[num_clients].client_request = of_property_read_bool(
+ pdev->dev.of_node,
+ "qcom,allocate-on-request");
+
rc = of_property_read_string(pdev->dev.of_node, "label",
&name);
if (rc) {
diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h
index 6b546528404c..908f091c86eb 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.h
+++ b/drivers/soc/qcom/memshare/msm_memshare.h
@@ -41,6 +41,8 @@ struct mem_blocks {
uint32_t allotted;
/* Memory allocation request received or not */
uint32_t alloc_request;
+ /* Allocation on request from a client*/
+ uint32_t client_request;
/* Size required for client */
uint32_t size;
/*
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index 959aab998ee1..e4b40347490b 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -182,6 +182,20 @@ static struct restart_notifier_block restart_notifiers[] = {
static int init_smem_remote_spinlock(void);
/**
+ * smem_get_toc() - Used for getting partitions TOC
+ *
+ * @return - Base address off partitions TOC
+ *
+ * Helper function to get base address of partition TOC,
+ * that is present in top 4K of first smem region.
+ */
+static struct smem_toc __iomem *smem_get_toc(void)
+{
+ return smem_areas[0].virt_addr +
+ smem_areas[0].size - 4 * 1024;
+}
+
+/**
* is_probe_done() - Did the probe function successfully complete
*
* @return - true if probe successfully completed, false if otherwise
@@ -315,6 +329,7 @@ static void *__smem_get_entry_nonsecure(unsigned int id, unsigned int *size,
int use_spinlocks = spinlocks_initialized && use_rspinlock;
void *ret = 0;
unsigned long flags = 0;
+ uint32_t e_size;
int rc;
if (!skip_init_check && !smem_initialized_check())
@@ -333,7 +348,11 @@ static void *__smem_get_entry_nonsecure(unsigned int id, unsigned int *size,
if (toc[id].allocated) {
phys_addr_t phys_base;
- *size = toc[id].size;
+ e_size = toc[id].size;
+ if (e_size > smem_ram_size)
+ return ret;
+ *size = e_size;
+
barrier();
phys_base = toc[id].reserved & BASE_ADDR_MASK;
@@ -368,12 +387,19 @@ static void *__smem_get_entry_secure(unsigned int id,
bool skip_init_check,
bool use_rspinlock)
{
+ struct smem_partition_allocation_header *alloc_hdr;
struct smem_partition_header *hdr;
+ uint32_t offset_free_uncached;
+ struct smem_toc __iomem *toc;
+ uint32_t offset_free_cached;
unsigned long lflags = 0;
- void *item = NULL;
- struct smem_partition_allocation_header *alloc_hdr;
+ uint32_t partition_size;
uint32_t partition_num;
+ uint32_t padding_data;
+ uint32_t padding_hdr;
uint32_t a_hdr_size;
+ uint32_t item_size;
+ void *item = NULL;
int rc;
SMEM_DBG("%s(%u, %u, %u, %d, %d)\n", __func__, id, to_proc,
@@ -393,9 +419,13 @@ static void *__smem_get_entry_secure(unsigned int id,
return NULL;
}
+ toc = smem_get_toc();
+
if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset) {
if (use_comm_partition) {
partition_num = comm_partition.partition_num;
+ partition_size =
+ readl_relaxed(&toc->entry[partition_num].size);
hdr = smem_areas[0].virt_addr + comm_partition.offset;
} else {
return __smem_get_entry_nonsecure(id, size,
@@ -403,6 +433,7 @@ static void *__smem_get_entry_secure(unsigned int id,
}
} else {
partition_num = partitions[to_proc].partition_num;
+ partition_size = readl_relaxed(&toc->entry[partition_num].size);
hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
}
if (unlikely(!spinlocks_initialized)) {
@@ -433,11 +464,20 @@ static void *__smem_get_entry_secure(unsigned int id,
if (flags & SMEM_ITEM_CACHED_FLAG) {
a_hdr_size = ALIGN(sizeof(*alloc_hdr),
partitions[to_proc].size_cacheline);
- for (alloc_hdr = (void *)(hdr) + hdr->size - a_hdr_size;
+ offset_free_cached = hdr->offset_free_cached;
+ if (WARN_ON(offset_free_cached > partition_size))
+ return NULL;
+
+ for (alloc_hdr = (void *)(hdr) + partition_size - a_hdr_size;
(void *)(alloc_hdr) > (void *)(hdr) +
- hdr->offset_free_cached;
+ offset_free_cached;
alloc_hdr = (void *)(alloc_hdr) -
- alloc_hdr->size - a_hdr_size) {
+ item_size - a_hdr_size) {
+ item_size = alloc_hdr->size;
+ padding_data = alloc_hdr->padding_data;
+ if (WARN_ON(padding_data > item_size
+ || item_size > partition_size))
+ return NULL;
if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
LOG_ERR(
"%s: SMEM corruption detected. Partition %d to %d at %p\n",
@@ -450,20 +490,30 @@ static void *__smem_get_entry_secure(unsigned int id,
}
if (alloc_hdr->smem_type == id) {
/* 8 byte alignment to match legacy */
- *size = ALIGN(alloc_hdr->size -
- alloc_hdr->padding_data, 8);
- item = (void *)(alloc_hdr) - alloc_hdr->size;
+ *size = ALIGN(item_size - padding_data, 8);
+ item = (void *)(alloc_hdr) - item_size;
break;
}
}
} else {
+ offset_free_uncached = hdr->offset_free_uncached;
+ if (WARN_ON(offset_free_uncached > partition_size))
+ return NULL;
+
for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
(void *)(alloc_hdr) < (void *)(hdr) +
- hdr->offset_free_uncached;
+ offset_free_uncached;
alloc_hdr = (void *)(alloc_hdr) +
sizeof(*alloc_hdr) +
- alloc_hdr->padding_hdr +
- alloc_hdr->size) {
+ padding_hdr +
+ item_size) {
+ padding_hdr = alloc_hdr->padding_hdr;
+ padding_data = alloc_hdr->padding_data;
+ item_size = alloc_hdr->size;
+ if (WARN_ON(padding_hdr > partition_size
+ || item_size > partition_size
+ || padding_data > item_size))
+ return NULL;
if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
LOG_ERR(
"%s: SMEM corruption detected. Partition %d to %d at %p\n",
@@ -476,11 +526,10 @@ static void *__smem_get_entry_secure(unsigned int id,
}
if (alloc_hdr->smem_type == id) {
/* 8 byte alignment to match legacy */
- *size = ALIGN(alloc_hdr->size -
- alloc_hdr->padding_data, 8);
+ *size = ALIGN(item_size - padding_data, 8);
item = (void *)(alloc_hdr) +
sizeof(*alloc_hdr) +
- alloc_hdr->padding_hdr;
+ padding_hdr;
break;
}
}
@@ -571,10 +620,17 @@ static void *alloc_item_nonsecure(unsigned int id, unsigned int size_in)
void *smem_base = smem_ram_base;
struct smem_shared *shared = smem_base;
struct smem_heap_entry *toc = shared->heap_toc;
+ uint32_t free_offset, heap_remaining;
void *ret = NULL;
- if (shared->heap_info.heap_remaining >= size_in) {
- toc[id].offset = shared->heap_info.free_offset;
+ heap_remaining = shared->heap_info.heap_remaining;
+ free_offset = shared->heap_info.free_offset;
+ if (WARN_ON(heap_remaining > smem_ram_size
+ || free_offset > smem_ram_size))
+ return NULL;
+
+ if (heap_remaining >= size_in) {
+ toc[id].offset = free_offset;
toc[id].size = size_in;
/*
* wmb() is necessary to ensure the allocation data is
@@ -586,7 +642,7 @@ static void *alloc_item_nonsecure(unsigned int id, unsigned int size_in)
shared->heap_info.free_offset += size_in;
shared->heap_info.heap_remaining -= size_in;
- ret = smem_base + toc[id].offset;
+ ret = smem_base + free_offset;
/*
* wmb() is necessary to ensure the heap data is consistent
* before continuing to prevent race conditions with remote
@@ -622,11 +678,15 @@ static void *alloc_item_secure(unsigned int id, unsigned int size_in,
void *smem_base = smem_ram_base;
struct smem_partition_header *hdr;
struct smem_partition_allocation_header *alloc_hdr;
+ uint32_t offset_free_uncached;
+ struct smem_toc __iomem *toc;
+ uint32_t offset_free_cached;
+ uint32_t partition_size;
+ uint32_t partition_num;
uint32_t a_hdr_size;
uint32_t a_data_size;
uint32_t size_cacheline;
uint32_t free_space;
- uint32_t partition_num;
void *ret = NULL;
if (to_proc == SMEM_COMM_HOST) {
@@ -653,27 +713,35 @@ static void *alloc_item_secure(unsigned int id, unsigned int size_in,
BUG();
}
- free_space = hdr->offset_free_cached -
- hdr->offset_free_uncached;
+ toc = smem_get_toc();
+ partition_size = readl_relaxed(&toc->entry[partition_num].size);
+
+ offset_free_cached = hdr->offset_free_cached;
+ offset_free_uncached = hdr->offset_free_uncached;
+ if (WARN_ON(offset_free_uncached > offset_free_cached
+ || offset_free_cached > partition_size))
+ return NULL;
+
+ free_space = offset_free_cached - offset_free_uncached;
if (flags & SMEM_ITEM_CACHED_FLAG) {
a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
a_data_size = ALIGN(size_in, size_cacheline);
- if (free_space < a_hdr_size + a_data_size) {
+ if (free_space < a_hdr_size + a_data_size
+ || free_space < size_in) {
SMEM_INFO(
- "%s: id %u not enough memory %u (required %u)\n",
- __func__, id, free_space,
- a_hdr_size + a_data_size);
+ "%s: id %u not enough memory %u (required %u), (size_in %u)\n",
+ __func__, id, free_space,
+ a_hdr_size + a_data_size, size_in);
return ret;
}
- alloc_hdr = (void *)(hdr) + hdr->offset_free_cached -
- a_hdr_size;
+ alloc_hdr = (void *)(hdr) + offset_free_cached - a_hdr_size;
alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
alloc_hdr->smem_type = id;
alloc_hdr->size = a_data_size;
alloc_hdr->padding_data = a_data_size - size_in;
alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
- hdr->offset_free_cached = hdr->offset_free_cached -
+ hdr->offset_free_cached = offset_free_cached -
a_hdr_size - a_data_size;
ret = (void *)(alloc_hdr) - a_data_size;
/*
@@ -688,20 +756,21 @@ static void *alloc_item_secure(unsigned int id, unsigned int size_in,
} else {
a_hdr_size = sizeof(*alloc_hdr);
a_data_size = ALIGN(size_in, 8);
- if (free_space < a_hdr_size + a_data_size) {
+ if (free_space < a_hdr_size + a_data_size
+ || free_space < size_in) {
SMEM_INFO(
- "%s: id %u not enough memory %u (required %u)\n",
- __func__, id, free_space,
- a_hdr_size + a_data_size);
+ "%s: id %u not enough memory %u (required %u) (size_in %u)\n",
+ __func__, id, free_space,
+ a_hdr_size + a_data_size, size_in);
return ret;
}
- alloc_hdr = (void *)(hdr) + hdr->offset_free_uncached;
+ alloc_hdr = (void *)(hdr) + offset_free_uncached;
alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
alloc_hdr->smem_type = id;
alloc_hdr->size = a_data_size;
alloc_hdr->padding_data = a_data_size - size_in;
alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
- hdr->offset_free_uncached = hdr->offset_free_uncached +
+ hdr->offset_free_uncached = offset_free_uncached +
a_hdr_size + a_data_size;
ret = alloc_hdr + 1;
}
@@ -892,6 +961,12 @@ unsigned int smem_get_free_space(unsigned int to_proc)
{
struct smem_partition_header *hdr;
struct smem_shared *shared;
+ uint32_t offset_free_uncached;
+ struct smem_toc __iomem *toc;
+ uint32_t offset_free_cached;
+ uint32_t heap_remaining;
+ uint32_t p_size;
+ uint32_t p_num;
if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
@@ -906,10 +981,24 @@ unsigned int smem_get_free_space(unsigned int to_proc)
return UINT_MAX;
}
hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
- return hdr->offset_free_cached - hdr->offset_free_uncached;
+ offset_free_cached = hdr->offset_free_cached;
+ offset_free_uncached = hdr->offset_free_uncached;
+
+ toc = smem_get_toc();
+ p_num = partitions[to_proc].partition_num;
+ p_size = readl_relaxed(&toc->entry[p_num].size);
+ if (WARN_ON(offset_free_uncached > offset_free_cached
+ || offset_free_cached > p_size))
+ return -EINVAL;
+
+ return offset_free_cached - offset_free_uncached;
}
shared = smem_ram_base;
- return shared->heap_info.heap_remaining;
+ heap_remaining = shared->heap_info.heap_remaining;
+ if (WARN_ON(heap_remaining > smem_ram_size))
+ return -EINVAL;
+
+ return heap_remaining;
}
EXPORT_SYMBOL(smem_get_free_space);
@@ -1216,8 +1305,8 @@ static void smem_init_security_partition(struct smem_toc_entry *entry,
LOG_ERR("Smem partition %d hdr magic is bad\n", num);
BUG();
}
- if (!hdr->size) {
- LOG_ERR("Smem partition %d size is 0\n", num);
+ if (hdr->size != entry->size) {
+ LOG_ERR("Smem partition %d size is invalid\n", num);
BUG();
}
if (hdr->offset_free_uncached > hdr->size) {
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 1c8bc5112256..b691df6c9218 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2011 Google, Inc
- * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017,2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -55,8 +55,8 @@ struct dest_vm_and_perm_info {
u32 ctx_size;
};
-static void *qcom_secure_mem;
-#define QCOM_SECURE_MEM_SIZE (512*1024)
+#define BATCH_MAX_SIZE SZ_2M
+#define BATCH_MAX_SECTIONS 32
static int secure_buffer_change_chunk(u32 chunks,
u32 nchunks,
@@ -215,43 +215,68 @@ populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
}
/* Must hold secure_buffer_mutex while allocated buffer is in use */
-static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
- size_t *size_in_bytes)
+static unsigned int get_batches_from_sgl(struct mem_prot_info *sg_table_copy,
+ struct scatterlist *sgl,
+ struct scatterlist **next_sgl)
{
- int i;
- struct scatterlist *sg;
- struct mem_prot_info *info;
- size_t size;
+ u64 batch_size = 0;
+ unsigned int i = 0;
+ struct scatterlist *curr_sgl = sgl;
+
+ /* Ensure no zero size batches */
+ do {
+ sg_table_copy[i].addr = page_to_phys(sg_page(curr_sgl));
+ sg_table_copy[i].size = curr_sgl->length;
+ batch_size += sg_table_copy[i].size;
+ curr_sgl = sg_next(curr_sgl);
+ i++;
+ } while (curr_sgl && i < BATCH_MAX_SECTIONS &&
+ curr_sgl->length + batch_size < BATCH_MAX_SIZE);
+
+ *next_sgl = curr_sgl;
+ return i;
+}
- size = table->nents * sizeof(*info);
+static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
+{
+ unsigned int entries_size;
+ unsigned int batch_start = 0;
+ unsigned int batches_processed;
+ struct scatterlist *curr_sgl = table->sgl;
+ struct scatterlist *next_sgl;
+ int ret = 0;
+ struct mem_prot_info *sg_table_copy = kcalloc(BATCH_MAX_SECTIONS,
+ sizeof(*sg_table_copy),
+ GFP_KERNEL);
- if (size >= QCOM_SECURE_MEM_SIZE) {
- pr_err("%s: Not enough memory allocated. Required size %zd\n",
- __func__, size);
- return NULL;
- }
+ if (!sg_table_copy)
+ return -ENOMEM;
- if (!qcom_secure_mem) {
- pr_err("%s is not functional as qcom_secure_mem is not allocated.\n",
- __func__);
- return NULL;
- }
+ while (batch_start < table->nents) {
+ batches_processed = get_batches_from_sgl(sg_table_copy,
+ curr_sgl, &next_sgl);
+ curr_sgl = next_sgl;
+ entries_size = batches_processed * sizeof(*sg_table_copy);
+ dmac_flush_range(sg_table_copy,
+ (void *)sg_table_copy + entries_size);
+ desc->args[0] = virt_to_phys(sg_table_copy);
+ desc->args[1] = entries_size;
- /* "Allocate" it */
- info = qcom_secure_mem;
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ MEM_PROT_ASSIGN_ID), desc);
+ if (ret) {
+ pr_info("%s: Failed to assign memory protection, ret = %d\n",
+ __func__, ret);
+ break;
+ }
- for_each_sg(table->sgl, sg, table->nents, i) {
- info[i].addr = page_to_phys(sg_page(sg));
- info[i].size = sg->length;
+ batch_start += batches_processed;
}
- *size_in_bytes = size;
- return info;
+ kfree(sg_table_copy);
+ return ret;
}
-#define BATCH_MAX_SIZE SZ_2M
-#define BATCH_MAX_SECTIONS 32
-
int hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
@@ -263,11 +288,10 @@ int hyp_assign_table(struct sg_table *table,
size_t source_vm_copy_size;
struct dest_vm_and_perm_info *dest_vm_copy;
size_t dest_vm_copy_size;
- struct mem_prot_info *sg_table_copy;
- size_t sg_table_copy_size;
- int batch_start, batch_end;
- u64 batch_size;
+ if (!table || !table->sgl || !source_vm_list || !source_nelems ||
+ !dest_vmids || !dest_perms || !dest_nelems)
+ return -EINVAL;
/*
* We can only pass cache-aligned sizes to hypervisor, so we need
@@ -285,19 +309,11 @@ int hyp_assign_table(struct sg_table *table,
&dest_vm_copy_size);
if (!dest_vm_copy) {
ret = -ENOMEM;
- goto out_free;
+ goto out_free_source;
}
mutex_lock(&secure_buffer_mutex);
- sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
- if (!sg_table_copy) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- desc.args[0] = virt_to_phys(sg_table_copy);
- desc.args[1] = sg_table_copy_size;
desc.args[2] = virt_to_phys(source_vm_copy);
desc.args[3] = source_vm_copy_size;
desc.args[4] = virt_to_phys(dest_vm_copy);
@@ -309,50 +325,14 @@ int hyp_assign_table(struct sg_table *table,
dmac_flush_range(source_vm_copy,
(void *)source_vm_copy + source_vm_copy_size);
- dmac_flush_range(sg_table_copy,
- (void *)sg_table_copy + sg_table_copy_size);
dmac_flush_range(dest_vm_copy,
(void *)dest_vm_copy + dest_vm_copy_size);
- batch_start = 0;
- while (batch_start < table->nents) {
- /* Ensure no size zero batches */
- batch_size = sg_table_copy[batch_start].size;
- batch_end = batch_start + 1;
- while (1) {
- u64 size;
-
- if (batch_end >= table->nents)
- break;
- if (batch_end - batch_start >= BATCH_MAX_SECTIONS)
- break;
-
- size = sg_table_copy[batch_end].size;
- if (size + batch_size >= BATCH_MAX_SIZE)
- break;
-
- batch_size += size;
- batch_end++;
- }
-
- desc.args[0] = virt_to_phys(&sg_table_copy[batch_start]);
- desc.args[1] = (batch_end - batch_start) *
- sizeof(sg_table_copy[0]);
-
- ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
- MEM_PROT_ASSIGN_ID), &desc);
- if (ret) {
- pr_info("%s: Failed to assign memory protection, ret = %d\n",
- __func__, ret);
- break;
- }
- batch_start = batch_end;
- }
+ ret = batched_hyp_assign(table, &desc);
-out_unlock:
mutex_unlock(&secure_buffer_mutex);
kfree(dest_vm_copy);
-out_free:
+out_free_source:
kfree(source_vm_copy);
return ret;
}
@@ -435,23 +415,3 @@ bool msm_secure_v2_is_supported(void)
return (scm_get_feat_version(FEATURE_ID_CP) >=
MAKE_CP_VERSION(1, 1, 0));
}
-
-static int __init alloc_secure_shared_memory(void)
-{
- int ret = 0;
- dma_addr_t dma_handle;
-
- qcom_secure_mem = kzalloc(QCOM_SECURE_MEM_SIZE, GFP_KERNEL);
- if (!qcom_secure_mem) {
- /* Fallback to CMA-DMA memory */
- qcom_secure_mem = dma_alloc_coherent(NULL, QCOM_SECURE_MEM_SIZE,
- &dma_handle, GFP_KERNEL);
- if (!qcom_secure_mem) {
- pr_err("Couldn't allocate memory for secure use-cases. hyp_assign_table will not work\n");
- return -ENOMEM;
- }
- }
-
- return ret;
-}
-pure_initcall(alloc_secure_shared_memory);
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index eb3a3b9ed565..eca6253fb70f 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -59,7 +59,7 @@
#define QMI_STATE_MIN_VAL QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MIN_VAL_V01
#define QMI_STATE_MAX_VAL QMI_SERVREG_NOTIF_SERVICE_STATE_ENUM_TYPE_MAX_VAL_V01
-#define SERVER_TIMEOUT 500
+#define SERVER_TIMEOUT 3000
#define MAX_STRING_LEN 100
/*
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index a074763baaf2..969eeadc3ea5 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -1174,12 +1174,12 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
mas->rx_rem_bytes -= rx_bytes;
}
-static irqreturn_t geni_spi_irq(int irq, void *dev)
+static irqreturn_t geni_spi_irq(int irq, void *data)
{
- struct spi_geni_master *mas = dev;
+ struct spi_geni_master *mas = data;
u32 m_irq = 0;
- if (pm_runtime_status_suspended(dev)) {
+ if (pm_runtime_status_suspended(mas->dev)) {
GENI_SE_DBG(mas->ipc, false, mas->dev,
"%s: device is suspended\n", __func__);
goto exit_geni_spi_irq;
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 72f2b6a1b9ed..3f0ebe861e2f 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -4,7 +4,7 @@
* Copyright (C) Linaro 2012
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -390,14 +390,37 @@ out:
return ret;
}
+static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ if (!is_buffer_hlos_assigned(buffer)) {
+ pr_info("%s: Mapping non-HLOS accessible buffer disallowed\n",
+ __func__);
+ return NULL;
+ }
+ return ion_cma_map_kernel(heap, buffer);
+}
+
+static int ion_secure_cma_map_user(struct ion_heap *mapper,
+ struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ if (!is_buffer_hlos_assigned(buffer)) {
+ pr_info("%s: Mapping non-HLOS accessible buffer disallowed\n",
+ __func__);
+ return -EINVAL;
+ }
+ return ion_cma_mmap(mapper, buffer, vma);
+}
+
static struct ion_heap_ops ion_secure_cma_ops = {
.allocate = ion_secure_cma_allocate,
.free = ion_secure_cma_free,
.map_dma = ion_cma_heap_map_dma,
.unmap_dma = ion_cma_heap_unmap_dma,
.phys = ion_cma_phys,
- .map_user = ion_cma_mmap,
- .map_kernel = ion_cma_map_kernel,
+ .map_user = ion_secure_cma_map_user,
+ .map_kernel = ion_secure_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
.print_debug = ion_cma_print_debug,
};
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 072e3ec72b57..6edd50761b04 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -2,7 +2,7 @@
* drivers/staging/android/ion/ion_system_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -98,6 +98,11 @@ size_t ion_system_heap_secure_page_pool_total(struct ion_heap *heap,
return total << PAGE_SHIFT;
}
+static int ion_heap_is_system_heap_type(enum ion_heap_type type)
+{
+ return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM);
+}
+
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long order,
@@ -266,6 +271,9 @@ static struct page_info *alloc_from_pool_preferred(
struct page_info *info;
int i;
+ if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)
+ goto force_alloc;
+
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return NULL;
@@ -297,6 +305,7 @@ static struct page_info *alloc_from_pool_preferred(
}
kfree(info);
+force_alloc:
return alloc_largest_available(heap, buffer, size, max_order);
}
@@ -353,6 +362,13 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
int vmid = get_secure_vmid(buffer->flags);
struct device *dev = heap->priv;
+ if (ion_heap_is_system_heap_type(buffer->heap->type) &&
+ is_secure_vmid_valid(vmid)) {
+ pr_info("%s: System heap doesn't support secure allocations\n",
+ __func__);
+ return -EINVAL;
+ }
+
if (align > PAGE_SIZE)
return -EINVAL;
diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c
index 5bf484beed96..cf86ea21838a 100644
--- a/drivers/staging/android/ion/ion_system_secure_heap.c
+++ b/drivers/staging/android/ion/ion_system_secure_heap.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016,2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -171,14 +171,15 @@ out:
sys_heap->ops->free(&buffer);
}
-static void process_one_shrink(struct ion_heap *sys_heap,
+static void process_one_shrink(struct ion_system_secure_heap *secure_heap,
+ struct ion_heap *sys_heap,
struct prefetch_info *info)
{
struct ion_buffer buffer;
size_t pool_size, size;
int ret;
- buffer.heap = sys_heap;
+ buffer.heap = &secure_heap->heap;
buffer.flags = info->vmid;
pool_size = ion_system_heap_secure_page_pool_total(sys_heap,
@@ -193,6 +194,7 @@ static void process_one_shrink(struct ion_heap *sys_heap,
}
buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE;
+ buffer.heap = sys_heap;
sys_heap->ops->free(&buffer);
}
@@ -212,7 +214,7 @@ static void ion_system_secure_heap_prefetch_work(struct work_struct *work)
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
if (info->shrink)
- process_one_shrink(sys_heap, info);
+ process_one_shrink(secure_heap, sys_heap, info);
else
process_one_prefetch(sys_heap, info);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 02f93f4a1c4b..5debcefe9e91 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -129,6 +129,24 @@ static DEFINE_MUTEX(core_isolate_lock);
static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_dev_list);
+static struct cpumask cpus_in_max_cooling_level;
+static BLOCKING_NOTIFIER_HEAD(cpu_max_cooling_level_notifer);
+
+void cpu_cooling_max_level_notifier_register(struct notifier_block *n)
+{
+ blocking_notifier_chain_register(&cpu_max_cooling_level_notifer, n);
+}
+
+void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n)
+{
+ blocking_notifier_chain_unregister(&cpu_max_cooling_level_notifer, n);
+}
+
+const struct cpumask *cpu_cooling_get_max_level_cpumask(void)
+{
+ return &cpus_in_max_cooling_level;
+}
+
/**
* get_idr - function to get a unique id.
* @idr: struct idr * handle used to create a id.
@@ -743,6 +761,9 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
cpumask_clear_cpu(cpu,
&cpus_isolated_by_thermal);
}
+ cpumask_set_cpu(cpu, &cpus_in_max_cooling_level);
+ blocking_notifier_call_chain(&cpu_max_cooling_level_notifer,
+ 1, (void *)(long)cpu);
return ret;
} else if ((prev_state == cpufreq_device->max_level)
&& (state < cpufreq_device->max_level)) {
@@ -756,6 +777,9 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
&cpus_isolated_by_thermal)) {
sched_unisolate_cpu(cpu);
}
+ cpumask_clear_cpu(cpu, &cpus_in_max_cooling_level);
+ blocking_notifier_call_chain(&cpu_max_cooling_level_notifer,
+ 0, (void *)(long)cpu);
}
update_frequency:
clip_freq = cpufreq_device->freq_table[state];
@@ -1171,6 +1195,7 @@ __cpufreq_cooling_register(struct device_node *np,
register_pm_notifier(&cpufreq_cooling_pm_nb);
cpumask_clear(&cpus_pending_online);
cpumask_clear(&cpus_isolated_by_thermal);
+ cpumask_clear(&cpus_in_max_cooling_level);
INIT_WORK(&cpuhp_register_work, register_cdev);
queue_work(system_wq, &cpuhp_register_work);
}
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index bec3dea8f49e..12f41db422fe 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -100,6 +100,8 @@
#define QPNP_BTM_Mn_DATA1(n) ((n * 2) + 0xa1)
#define QPNP_BTM_CHANNELS 8
+#define QPNP_ADC_WAKEUP_SRC_TIMEOUT_MS 2000
+
/* QPNP ADC TM HC end */
struct qpnp_adc_thr_info {
@@ -152,7 +154,6 @@ struct qpnp_adc_tm_chip {
bool adc_tm_initialized;
bool adc_tm_recalib_check;
int max_channels_available;
- atomic_t wq_cnt;
struct qpnp_vadc_chip *vadc_dev;
struct workqueue_struct *high_thr_wq;
struct workqueue_struct *low_thr_wq;
@@ -1207,7 +1208,6 @@ static void notify_adc_tm_fn(struct work_struct *work)
{
struct qpnp_adc_tm_sensor *adc_tm = container_of(work,
struct qpnp_adc_tm_sensor, work);
- struct qpnp_adc_tm_chip *chip = adc_tm->chip;
if (adc_tm->thermal_node) {
pr_debug("notifying uspace client\n");
@@ -1218,8 +1218,6 @@ static void notify_adc_tm_fn(struct work_struct *work)
else
notify_clients(adc_tm);
}
-
- atomic_dec(&chip->wq_cnt);
}
static int qpnp_adc_tm_disable_rearm_high_thresholds(
@@ -1305,11 +1303,8 @@ static int qpnp_adc_tm_disable_rearm_high_thresholds(
return rc;
}
- if (!queue_work(chip->sensor[sensor_num].req_wq,
- &chip->sensor[sensor_num].work)) {
- /* The item is already queued, reduce the count */
- atomic_dec(&chip->wq_cnt);
- }
+ queue_work(chip->sensor[sensor_num].req_wq,
+ &chip->sensor[sensor_num].work);
return rc;
}
@@ -1396,11 +1391,8 @@ static int qpnp_adc_tm_disable_rearm_low_thresholds(
return rc;
}
- if (!queue_work(chip->sensor[sensor_num].req_wq,
- &chip->sensor[sensor_num].work)) {
- /* The item is already queued, reduce the count */
- atomic_dec(&chip->wq_cnt);
- }
+ queue_work(chip->sensor[sensor_num].req_wq,
+ &chip->sensor[sensor_num].work);
return rc;
}
@@ -1446,10 +1438,6 @@ static int qpnp_adc_tm_read_status(struct qpnp_adc_tm_chip *chip)
fail:
mutex_unlock(&chip->adc->adc_lock);
- if (rc < 0 || (!chip->th_info.adc_tm_high_enable &&
- !chip->th_info.adc_tm_low_enable))
- atomic_dec(&chip->wq_cnt);
-
return rc;
}
@@ -1616,14 +1604,16 @@ static irqreturn_t qpnp_adc_tm_rc_thr_isr(int irq, void *data)
}
if (sensor_low_notify_num) {
- if (queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work))
- atomic_inc(&chip->wq_cnt);
+ pm_wakeup_event(chip->dev,
+ QPNP_ADC_WAKEUP_SRC_TIMEOUT_MS);
+ queue_work(chip->low_thr_wq, &chip->trigger_low_thr_work);
}
if (sensor_high_notify_num) {
- if (queue_work(chip->high_thr_wq,
- &chip->trigger_high_thr_work))
- atomic_inc(&chip->wq_cnt);
+ pm_wakeup_event(chip->dev,
+ QPNP_ADC_WAKEUP_SRC_TIMEOUT_MS);
+ queue_work(chip->high_thr_wq,
+ &chip->trigger_high_thr_work);
}
return IRQ_HANDLED;
@@ -2014,7 +2004,6 @@ static int qpnp_adc_tm_probe(struct platform_device *pdev)
INIT_WORK(&chip->trigger_high_thr_work, qpnp_adc_tm_high_thr_work);
INIT_WORK(&chip->trigger_low_thr_work, qpnp_adc_tm_low_thr_work);
- atomic_set(&chip->wq_cnt, 0);
rc = devm_request_irq(&pdev->dev, chip->adc->adc_irq_eoc,
qpnp_adc_tm_rc_thr_isr,
@@ -2093,11 +2082,18 @@ static void qpnp_adc_tm_shutdown(struct platform_device *pdev)
static int qpnp_adc_tm_suspend_noirq(struct device *dev)
{
struct qpnp_adc_tm_chip *chip = dev_get_drvdata(dev);
+ struct device_node *node = dev->of_node, *child;
+ int i = 0;
+
+ flush_workqueue(chip->high_thr_wq);
+ flush_workqueue(chip->low_thr_wq);
- if (atomic_read(&chip->wq_cnt) != 0) {
- pr_err(
- "Aborting suspend, adc_tm notification running while suspending\n");
- return -EBUSY;
+ for_each_child_of_node(node, child) {
+ if (chip->sensor[i].req_wq) {
+ pr_debug("flushing queue for sensor %d\n", i);
+ flush_workqueue(chip->sensor[i].req_wq);
+ }
+ i++;
}
return 0;
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8d9f9a803b42..31a536eb4361 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1649,6 +1649,10 @@ static void release_one_tty(struct work_struct *work)
put_pid(tty->pgrp);
put_pid(tty->session);
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+ if (tty->echo_delayed_work.work.func)
+ cancel_delayed_work_sync(&tty->echo_delayed_work);
+#endif
free_tty_struct(tty);
}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 6111d399aecc..14c0c4640cc0 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -1971,7 +1971,8 @@ static void usbpd_sm(struct work_struct *w)
/* Disconnect? */
if (pd->current_pr == PR_NONE) {
- if (pd->current_state == PE_UNKNOWN)
+ if (pd->current_state == PE_UNKNOWN &&
+ pd->current_dr == DR_NONE)
goto sm_done;
if (pd->vconn_enabled) {
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
index f38d40c8aa18..a9aaa07eb3d7 100644
--- a/drivers/video/fbdev/msm/mdss_debug.c
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -420,6 +420,39 @@ static int mdss_debug_base_release(struct inode *inode, struct file *file)
return 0;
}
+/**
+ * mdss_debug_base_is_valid_range - verify if requested memory range is valid
+ * @off: address offset in bytes
+ * @cnt: memory size in bytes
+ * Return: true if valid; false otherwise
+ */
+static bool mdss_debug_base_is_valid_range(u32 off, u32 cnt)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_debug_data *mdd = mdata->debug_inf.debug_data;
+ struct range_dump_node *node;
+ struct mdss_debug_base *base;
+
+ pr_debug("check offset=0x%x cnt=0x%x\n", off, cnt);
+
+ list_for_each_entry(base, &mdd->base_list, head) {
+ list_for_each_entry(node, &base->dump_list, head) {
+ pr_debug("%s: start=0x%x end=0x%x\n", node->range_name,
+ node->offset.start, node->offset.end);
+
+ if (node->offset.start <= off
+ && off <= node->offset.end
+ && off + cnt <= node->offset.end) {
+ pr_debug("valid range requested\n");
+ return true;
+ }
+ }
+ }
+
+ pr_err("invalid range requested\n");
+ return false;
+}
+
static ssize_t mdss_debug_base_offset_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
@@ -451,6 +484,9 @@ static ssize_t mdss_debug_base_offset_write(struct file *file,
if (cnt > (dbg->max_offset - off))
cnt = dbg->max_offset - off;
+ if (!mdss_debug_base_is_valid_range(off, cnt))
+ return -EINVAL;
+
mutex_lock(&mdss_debug_lock);
dbg->off = off;
dbg->cnt = cnt;
diff --git a/fs/crypto/fscrypt_ice.h b/fs/crypto/fscrypt_ice.h
index 352aea4b2a03..b7a480772d2b 100644
--- a/fs/crypto/fscrypt_ice.h
+++ b/fs/crypto/fscrypt_ice.h
@@ -21,7 +21,7 @@ static inline int fscrypt_should_be_processed_by_ice(const struct inode *inode)
{
if (!inode->i_sb->s_cop)
return 0;
- if (!inode->i_sb->s_cop->is_encrypted((struct inode *)inode))
+ if (!IS_ENCRYPTED((struct inode *)inode))
return 0;
return fscrypt_using_hardware_encryption(inode);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 0d1048e92bd3..1f9540faa853 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -247,7 +247,7 @@ void __exit fscrypt_essiv_cleanup(void)
static int fscrypt_data_encryption_mode(struct inode *inode)
{
- return fscrypt_should_be_processed_by_ice(inode) ?
+ return fscrypt_is_ice_capable(inode->i_sb) ?
FS_ENCRYPTION_MODE_PRIVATE : FS_ENCRYPTION_MODE_AES_256_XTS;
}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index e04ec868e37e..efe8d35d46fb 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -242,8 +242,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
*/
ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
sb->s_blocksize * 8, bh->b_data);
- ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
- ext4_group_desc_csum_set(sb, block_group, gdp);
return 0;
}
@@ -443,10 +441,20 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
goto verify;
}
ext4_lock_group(sb, block_group);
- if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
+ if (block_group == 0) {
+ ext4_unlock_group(sb, block_group);
+ unlock_buffer(bh);
+ ext4_error(sb, "Block bitmap for bg 0 marked "
+ "uninitialized");
+ err = -EFSCORRUPTED;
+ goto out;
+ }
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
+ set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
if (err) {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c17539153099..5f3d2e2bdbdf 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1533,11 +1533,6 @@ static inline struct timespec ext4_current_time(struct inode *inode)
static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
{
return ino == EXT4_ROOT_INO ||
- ino == EXT4_USR_QUOTA_INO ||
- ino == EXT4_GRP_QUOTA_INO ||
- ino == EXT4_BOOT_LOADER_INO ||
- ino == EXT4_JOURNAL_INO ||
- ino == EXT4_RESIZE_INO ||
(ino >= EXT4_FIRST_INO(sb) &&
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
}
@@ -3007,9 +3002,6 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
extern int ext4_inline_data_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
int *has_inline, __u64 start, __u64 len);
-extern int ext4_try_to_evict_inline_data(handle_t *handle,
- struct inode *inode,
- int needed);
extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
extern int ext4_convert_inline_data(struct inode *inode);
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 8ecf84b8f5a1..a284fb28944b 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -103,6 +103,7 @@ struct ext4_extent_header {
};
#define EXT4_EXT_MAGIC cpu_to_le16(0xf30a)
+#define EXT4_MAX_EXTENT_DEPTH 5
#define EXT4_EXTENT_TAIL_OFFSET(hdr) \
(sizeof(struct ext4_extent_header) + \
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index a8573faa491a..cf4f1063fda2 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -881,6 +881,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
eh = ext_inode_hdr(inode);
depth = ext_depth(inode);
+ if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
+ EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
+ depth);
+ ret = -EFSCORRUPTED;
+ goto err;
+ }
if (path) {
ext4_ext_drop_refs(path);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 2cec6059e886..0864f0b59f82 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -63,44 +63,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}
-/* Initializes an uninitialized inode bitmap */
-static int ext4_init_inode_bitmap(struct super_block *sb,
- struct buffer_head *bh,
- ext4_group_t block_group,
- struct ext4_group_desc *gdp)
-{
- struct ext4_group_info *grp;
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- J_ASSERT_BH(bh, buffer_locked(bh));
-
- /* If checksum is bad mark all blocks and inodes use to prevent
- * allocation, essentially implementing a per-group read-only flag. */
- if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
- grp = ext4_get_group_info(sb, block_group);
- if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
- percpu_counter_sub(&sbi->s_freeclusters_counter,
- grp->bb_free);
- set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
- if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
- int count;
- count = ext4_free_inodes_count(sb, gdp);
- percpu_counter_sub(&sbi->s_freeinodes_counter,
- count);
- }
- set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
- return -EFSBADCRC;
- }
-
- memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
- ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
- bh->b_data);
- ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
- ext4_group_desc_csum_set(sb, block_group, gdp);
-
- return 0;
-}
-
void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
{
if (uptodate) {
@@ -183,18 +145,24 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
}
ext4_lock_group(sb, block_group);
- if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
- err = ext4_init_inode_bitmap(sb, bh, block_group, desc);
+ if (ext4_has_group_desc_csum(sb) &&
+ (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
+ if (block_group == 0) {
+ ext4_unlock_group(sb, block_group);
+ unlock_buffer(bh);
+ ext4_error(sb, "Inode bitmap for bg 0 marked "
+ "uninitialized");
+ err = -EFSCORRUPTED;
+ goto out;
+ }
+ memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
+ ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
+ sb->s_blocksize * 8, bh->b_data);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
- if (err) {
- ext4_error(sb, "Failed to init inode bitmap for group "
- "%u: %d", block_group, err);
- goto out;
- }
return bh;
}
ext4_unlock_group(sb, block_group);
@@ -960,7 +928,8 @@ got:
/* recheck and clear flag under lock if we still need to */
ext4_lock_group(sb, group);
- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, group, gdp));
@@ -1341,7 +1310,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
ext4_itable_unused_count(sb, gdp)),
sbi->s_inodes_per_block);
- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
+ if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
+ ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
+ ext4_itable_unused_count(sb, gdp)) <
+ EXT4_FIRST_INO(sb)))) {
ext4_error(sb, "Something is wrong with group %u: "
"used itable blocks: %d; "
"itable unused count: %u",
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 4d78b932c320..b7217977e4f8 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -897,11 +897,11 @@ retry_journal:
flags |= AOP_FLAG_NOFS;
if (ret == -ENOSPC) {
+ ext4_journal_stop(handle);
ret = ext4_da_convert_inline_data_to_extent(mapping,
inode,
flags,
fsdata);
- ext4_journal_stop(handle);
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal;
@@ -1873,42 +1873,6 @@ out:
return (error < 0 ? error : 0);
}
-/*
- * Called during xattr set, and if we can sparse space 'needed',
- * just create the extent tree evict the data to the outer block.
- *
- * We use jbd2 instead of page cache to move data to the 1st block
- * so that the whole transaction can be committed as a whole and
- * the data isn't lost because of the delayed page cache write.
- */
-int ext4_try_to_evict_inline_data(handle_t *handle,
- struct inode *inode,
- int needed)
-{
- int error;
- struct ext4_xattr_entry *entry;
- struct ext4_inode *raw_inode;
- struct ext4_iloc iloc;
-
- error = ext4_get_inode_loc(inode, &iloc);
- if (error)
- return error;
-
- raw_inode = ext4_raw_inode(&iloc);
- entry = (struct ext4_xattr_entry *)((void *)raw_inode +
- EXT4_I(inode)->i_inline_off);
- if (EXT4_XATTR_LEN(entry->e_name_len) +
- EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
- error = -ENOSPC;
- goto out;
- }
-
- error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
-out:
- brelse(iloc.bh);
- return error;
-}
-
void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
{
handle_t *handle;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index fd38b2c5eee7..cb511fa31a22 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4311,7 +4311,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
int inodes_per_block, inode_offset;
iloc->bh = NULL;
- if (!ext4_valid_inum(sb, inode->i_ino))
+ if (inode->i_ino < EXT4_ROOT_INO ||
+ inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
return -EFSCORRUPTED;
iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index e5e99a7f101f..40811e37af20 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2444,7 +2444,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
* initialize bb_free to be able to skip
* empty groups without initialization
*/
- if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
meta_group_info[i]->bb_free =
ext4_free_clusters_after_init(sb, group, desc);
} else {
@@ -2970,7 +2971,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
#endif
ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
ac->ac_b_ex.fe_len);
- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b92373e5b24b..f32007285185 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1182,11 +1182,6 @@ static unsigned ext4_max_namelen(struct inode *inode)
EXT4_NAME_LEN;
}
-static inline bool ext4_is_encrypted(struct inode *inode)
-{
- return ext4_encrypted_inode(inode);
-}
-
static const struct fscrypt_operations ext4_cryptops = {
.key_prefix = "ext4:",
.get_context = ext4_get_context,
@@ -1194,7 +1189,6 @@ static const struct fscrypt_operations ext4_cryptops = {
.dummy_context = ext4_dummy_context,
.empty_dir = ext4_empty_dir,
.max_namelen = ext4_max_namelen,
- .is_encrypted = ext4_is_encrypted,
};
#endif
@@ -3010,6 +3004,9 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
struct ext4_group_desc *gdp = NULL;
+ if (!ext4_has_group_desc_csum(sb))
+ return ngroups;
+
for (group = 0; group < ngroups; group++) {
gdp = ext4_get_group_desc(sb, group, NULL);
if (!gdp)
@@ -3691,6 +3688,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
} else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+ if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
+ ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
+ sbi->s_first_ino);
+ goto failed_mount;
+ }
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
(!is_power_of_2(sbi->s_inode_size)) ||
(sbi->s_inode_size > blocksize)) {
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 3eeed8f0aa06..9c408182bb19 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -207,12 +207,12 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
{
int error;
- if (buffer_verified(bh))
- return 0;
-
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1))
return -EFSCORRUPTED;
+ if (buffer_verified(bh))
+ return 0;
+
if (!ext4_xattr_block_csum_verify(inode, bh))
return -EFSBADCRC;
error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
@@ -643,14 +643,20 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
}
static int
-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
+ struct inode *inode)
{
- struct ext4_xattr_entry *last;
+ struct ext4_xattr_entry *last, *next;
size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
/* Compute min_offs and last. */
last = s->first;
- for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ for (; !IS_LAST_ENTRY(last); last = next) {
+ next = EXT4_XATTR_NEXT(last);
+ if ((void *)next >= s->end) {
+ EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+ return -EIO;
+ }
if (last->e_value_size) {
size_t offs = le16_to_cpu(last->e_value_offs);
if (offs < min_offs)
@@ -832,7 +838,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
mb_cache_entry_delete_block(ext4_mb_cache, hash,
bs->bh->b_blocknr);
ea_bdebug(bs->bh, "modifying in-place");
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (!error) {
if (!IS_LAST_ENTRY(s->first))
ext4_xattr_rehash(header(s->base),
@@ -881,7 +887,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
s->end = s->base + sb->s_blocksize;
}
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (error == -EFSCORRUPTED)
goto bad_block;
if (error)
@@ -1078,23 +1084,9 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
if (EXT4_I(inode)->i_extra_isize == 0)
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s);
- if (error) {
- if (error == -ENOSPC &&
- ext4_has_inline_data(inode)) {
- error = ext4_try_to_evict_inline_data(handle, inode,
- EXT4_XATTR_LEN(strlen(i->name) +
- EXT4_XATTR_SIZE(i->value_len)));
- if (error)
- return error;
- error = ext4_xattr_ibody_find(inode, i, is);
- if (error)
- return error;
- error = ext4_xattr_set_entry(i, s);
- }
- if (error)
- return error;
- }
+ error = ext4_xattr_set_entry(i, s, inode);
+ if (error)
+ return error;
header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
@@ -1116,7 +1108,7 @@ static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
if (EXT4_I(inode)->i_extra_isize == 0)
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (error)
return error;
header = IHDR(inode, ext4_raw_inode(&is->iloc));
@@ -1425,6 +1417,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
last = IFIRST(header);
/* Find the entry best suited to be pushed into EA block */
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ /* never move system.data out of the inode */
+ if ((last->e_name_len == 4) &&
+ (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
+ !memcmp(last->e_name, "data", 4))
+ continue;
total_size =
EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
EXT4_XATTR_LEN(last->e_name_len);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 716d26441747..4d2758282373 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -493,7 +493,8 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
struct fscrypt_ctx *ctx = NULL;
struct bio *bio;
- if (f2fs_encrypted_file(inode)) {
+ if (f2fs_encrypted_file(inode) &&
+ !fscrypt_using_hardware_encryption(inode)) {
ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 7ab9242f1e2b..1692c263a810 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1808,18 +1808,12 @@ static unsigned f2fs_max_namelen(struct inode *inode)
inode->i_sb->s_blocksize : F2FS_NAME_LEN;
}
-static inline bool f2fs_is_encrypted(struct inode *inode)
-{
- return f2fs_encrypted_file(inode);
-}
-
static const struct fscrypt_operations f2fs_cryptops = {
.key_prefix = "f2fs:",
.get_context = f2fs_get_context,
.set_context = f2fs_set_context,
.empty_dir = f2fs_empty_dir,
.max_namelen = f2fs_max_namelen,
- .is_encrypted = f2fs_is_encrypted,
};
#endif
diff --git a/fs/inode.c b/fs/inode.c
index 3844c3122f1a..1d1a9573ca70 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -2003,8 +2003,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
inode->i_uid = current_fsuid();
if (dir && dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
+
+ /* Directories are special, and always inherit S_ISGID */
if (S_ISDIR(mode))
mode |= S_ISGID;
+ else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
+ !in_group_p(inode->i_gid) &&
+ !capable_wrt_inode_uidgid(dir, CAP_FSETID))
+ mode &= ~S_ISGID;
} else
inode->i_gid = current_fsgid();
inode->i_mode = mode;
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 5e659ee08d6a..4aa23281748d 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1349,6 +1349,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
if (jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata) {
jbd_lock_bh_state(bh);
+ if (jh->b_transaction == transaction &&
+ jh->b_jlist != BJ_Metadata)
+ pr_err("JBD2: assertion failure: h_type=%u "
+ "h_line_no=%u block_no=%llu jlist=%u\n",
+ handle->h_type, handle->h_line_no,
+ (unsigned long long) bh->b_blocknr,
+ jh->b_jlist);
J_ASSERT_JH(jh, jh->b_transaction != transaction ||
jh->b_jlist == BJ_Metadata);
jbd_unlock_bh_state(bh);
@@ -1368,11 +1375,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* of the transaction. This needs to be done
* once a transaction -bzzz
*/
- jh->b_modified = 1;
if (handle->h_buffer_credits <= 0) {
ret = -ENOSPC;
goto out_unlock_bh;
}
+ jh->b_modified = 1;
handle->h_buffer_credits--;
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index e69ebe648a34..be3e1db832b5 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -456,17 +456,12 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
return inode;
}
-int proc_fill_super(struct super_block *s, void *data, int silent)
+int proc_fill_super(struct super_block *s)
{
- struct pid_namespace *ns = get_pid_ns(s->s_fs_info);
struct inode *root_inode;
int ret;
- if (!proc_parse_options(data, ns))
- return -EINVAL;
-
- /* User space would break if executables or devices appear on proc */
- s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
+ s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NODEV;
s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 6dfb41413d88..fde3131e4e42 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -213,7 +213,7 @@ extern const struct file_operations proc_reclaim_operations;
extern void proc_init_inodecache(void);
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
-extern int proc_fill_super(struct super_block *, void *data, int flags);
+extern int proc_fill_super(struct super_block *);
extern void proc_entry_rundown(struct proc_dir_entry *);
/*
@@ -269,7 +269,6 @@ static inline void proc_tty_init(void) {}
* root.c
*/
extern struct proc_dir_entry proc_root;
-extern int proc_parse_options(char *options, struct pid_namespace *pid);
extern void proc_self_init(void);
extern int proc_remount(struct super_block *, int *, char *);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 8d3e484055a6..a1b2860fec62 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -23,6 +23,21 @@
#include "internal.h"
+static int proc_test_super(struct super_block *sb, void *data)
+{
+ return sb->s_fs_info == data;
+}
+
+static int proc_set_super(struct super_block *sb, void *data)
+{
+ int err = set_anon_super(sb, NULL);
+ if (!err) {
+ struct pid_namespace *ns = (struct pid_namespace *)data;
+ sb->s_fs_info = get_pid_ns(ns);
+ }
+ return err;
+}
+
enum {
Opt_gid, Opt_hidepid, Opt_err,
};
@@ -33,7 +48,7 @@ static const match_table_t tokens = {
{Opt_err, NULL},
};
-int proc_parse_options(char *options, struct pid_namespace *pid)
+static int proc_parse_options(char *options, struct pid_namespace *pid)
{
char *p;
substring_t args[MAX_OPT_ARGS];
@@ -85,16 +100,45 @@ int proc_remount(struct super_block *sb, int *flags, char *data)
static struct dentry *proc_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
+ int err;
+ struct super_block *sb;
struct pid_namespace *ns;
+ char *options;
if (flags & MS_KERNMOUNT) {
- ns = data;
- data = NULL;
+ ns = (struct pid_namespace *)data;
+ options = NULL;
} else {
ns = task_active_pid_ns(current);
+ options = data;
+
+ /* Does the mounter have privilege over the pid namespace? */
+ if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+ }
+
+ sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns);
+ if (IS_ERR(sb))
+ return ERR_CAST(sb);
+
+ if (!proc_parse_options(options, ns)) {
+ deactivate_locked_super(sb);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!sb->s_root) {
+ err = proc_fill_super(sb);
+ if (err) {
+ deactivate_locked_super(sb);
+ return ERR_PTR(err);
+ }
+
+ sb->s_flags |= MS_ACTIVE;
+ /* User space would break if executables appear on proc */
+ sb->s_iflags |= SB_I_NOEXEC;
}
- return mount_ns(fs_type, flags, data, ns, ns->user_ns, proc_fill_super);
+ return dget(sb->s_root);
}
static void proc_kill_sb(struct super_block *sb)
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index 5ac0b0bbb0ec..399531bc1f6f 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -115,7 +115,11 @@ static long sdcardfs_unlocked_ioctl(struct file *file, unsigned int cmd,
goto out;
/* save current_cred and override it */
- OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+ saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+ if (!saved_cred) {
+ err = -ENOMEM;
+ goto out;
+ }
if (lower_file->f_op->unlocked_ioctl)
err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
@@ -124,7 +128,7 @@ static long sdcardfs_unlocked_ioctl(struct file *file, unsigned int cmd,
if (!err)
sdcardfs_copy_and_fix_attrs(file_inode(file),
file_inode(lower_file));
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out:
return err;
}
@@ -146,12 +150,16 @@ static long sdcardfs_compat_ioctl(struct file *file, unsigned int cmd,
goto out;
/* save current_cred and override it */
- OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+ saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+ if (!saved_cred) {
+ err = -ENOMEM;
+ goto out;
+ }
if (lower_file->f_op->compat_ioctl)
err = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out:
return err;
}
@@ -238,7 +246,11 @@ static int sdcardfs_open(struct inode *inode, struct file *file)
}
/* save current_cred and override it */
- OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(inode));
+ saved_cred = override_fsids(sbi, SDCARDFS_I(inode)->data);
+ if (!saved_cred) {
+ err = -ENOMEM;
+ goto out_err;
+ }
file->private_data =
kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL);
@@ -268,7 +280,7 @@ static int sdcardfs_open(struct inode *inode, struct file *file)
sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode));
out_revert_cred:
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_err:
dput(parent);
return err;
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 137d876373a6..9d3e65fc9518 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -22,7 +22,6 @@
#include <linux/fs_struct.h>
#include <linux/ratelimit.h>
-/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
struct sdcardfs_inode_data *data)
{
@@ -50,7 +49,6 @@ const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
return old_cred;
}
-/* Do not directly use this function, use REVERT_CRED() instead. */
void revert_fsids(const struct cred *old_cred)
{
const struct cred *cur_cred;
@@ -78,7 +76,10 @@ static int sdcardfs_create(struct inode *dir, struct dentry *dentry,
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
@@ -95,8 +96,11 @@ static int sdcardfs_create(struct inode *dir, struct dentry *dentry,
err = -ENOMEM;
goto out_unlock;
}
+ copied_fs->umask = 0;
+ task_lock(current);
current->fs = copied_fs;
- current->fs->umask = 0;
+ task_unlock(current);
+
err = vfs_create2(lower_dentry_mnt, d_inode(lower_parent_dentry), lower_dentry, mode, want_excl);
if (err)
goto out;
@@ -110,58 +114,18 @@ static int sdcardfs_create(struct inode *dir, struct dentry *dentry,
fixup_lower_ownership(dentry, dentry->d_name.name);
out:
+ task_lock(current);
current->fs = saved_fs;
+ task_unlock(current);
free_fs_struct(copied_fs);
out_unlock:
unlock_dir(lower_parent_dentry);
sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
-#if 0
-static int sdcardfs_link(struct dentry *old_dentry, struct inode *dir,
- struct dentry *new_dentry)
-{
- struct dentry *lower_old_dentry;
- struct dentry *lower_new_dentry;
- struct dentry *lower_dir_dentry;
- u64 file_size_save;
- int err;
- struct path lower_old_path, lower_new_path;
-
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
- file_size_save = i_size_read(d_inode(old_dentry));
- sdcardfs_get_lower_path(old_dentry, &lower_old_path);
- sdcardfs_get_lower_path(new_dentry, &lower_new_path);
- lower_old_dentry = lower_old_path.dentry;
- lower_new_dentry = lower_new_path.dentry;
- lower_dir_dentry = lock_parent(lower_new_dentry);
-
- err = vfs_link(lower_old_dentry, d_inode(lower_dir_dentry),
- lower_new_dentry, NULL);
- if (err || !d_inode(lower_new_dentry))
- goto out;
-
- err = sdcardfs_interpose(new_dentry, dir->i_sb, &lower_new_path);
- if (err)
- goto out;
- fsstack_copy_attr_times(dir, d_inode(lower_new_dentry));
- fsstack_copy_inode_size(dir, d_inode(lower_new_dentry));
- set_nlink(d_inode(old_dentry),
- sdcardfs_lower_inode(d_inode(old_dentry))->i_nlink);
- i_size_write(d_inode(new_dentry), file_size_save);
-out:
- unlock_dir(lower_dir_dentry);
- sdcardfs_put_lower_path(old_dentry, &lower_old_path);
- sdcardfs_put_lower_path(new_dentry, &lower_new_path);
- REVERT_CRED();
- return err;
-}
-#endif
-
static int sdcardfs_unlink(struct inode *dir, struct dentry *dentry)
{
int err;
@@ -178,7 +142,10 @@ static int sdcardfs_unlink(struct inode *dir, struct dentry *dentry)
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
@@ -209,43 +176,11 @@ out:
unlock_dir(lower_dir_dentry);
dput(lower_dentry);
sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
-#if 0
-static int sdcardfs_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
-{
- int err;
- struct dentry *lower_dentry;
- struct dentry *lower_parent_dentry = NULL;
- struct path lower_path;
-
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
- sdcardfs_get_lower_path(dentry, &lower_path);
- lower_dentry = lower_path.dentry;
- lower_parent_dentry = lock_parent(lower_dentry);
-
- err = vfs_symlink(d_inode(lower_parent_dentry), lower_dentry, symname);
- if (err)
- goto out;
- err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
- if (err)
- goto out;
- fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
- fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
- unlock_dir(lower_parent_dentry);
- sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED();
- return err;
-}
-#endif
-
static int touch(char *abs_path, mode_t mode)
{
struct file *filp = filp_open(abs_path, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, mode);
@@ -286,7 +221,10 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
/* check disk space */
if (!check_min_free_space(dentry, 0, 1)) {
@@ -312,8 +250,11 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
unlock_dir(lower_parent_dentry);
goto out_unlock;
}
+ copied_fs->umask = 0;
+ task_lock(current);
current->fs = copied_fs;
- current->fs->umask = 0;
+ task_unlock(current);
+
err = vfs_mkdir2(lower_mnt, d_inode(lower_parent_dentry), lower_dentry, mode);
if (err) {
@@ -362,23 +303,34 @@ static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
if (make_nomedia_in_obb ||
((pd->perm == PERM_ANDROID)
&& (qstr_case_eq(&dentry->d_name, &q_data)))) {
- REVERT_CRED(saved_cred);
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(d_inode(dentry)));
+ revert_fsids(saved_cred);
+ saved_cred = override_fsids(sbi,
+ SDCARDFS_I(d_inode(dentry))->data);
+ if (!saved_cred) {
+ pr_err("sdcardfs: failed to set up .nomedia in %s: %d\n",
+ lower_path.dentry->d_name.name,
+ -ENOMEM);
+ goto out;
+ }
set_fs_pwd(current->fs, &lower_path);
touch_err = touch(".nomedia", 0664);
if (touch_err) {
pr_err("sdcardfs: failed to create .nomedia in %s: %d\n",
- lower_path.dentry->d_name.name, touch_err);
+ lower_path.dentry->d_name.name,
+ touch_err);
goto out;
}
}
out:
+ task_lock(current);
current->fs = saved_fs;
+ task_unlock(current);
+
free_fs_struct(copied_fs);
out_unlock:
sdcardfs_put_lower_path(dentry, &lower_path);
out_revert:
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
@@ -398,7 +350,10 @@ static int sdcardfs_rmdir(struct inode *dir, struct dentry *dentry)
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
/* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
* the dentry on the original path should be deleted.
@@ -423,44 +378,11 @@ static int sdcardfs_rmdir(struct inode *dir, struct dentry *dentry)
out:
unlock_dir(lower_dir_dentry);
sdcardfs_put_real_lower(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
-#if 0
-static int sdcardfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t dev)
-{
- int err;
- struct dentry *lower_dentry;
- struct dentry *lower_parent_dentry = NULL;
- struct path lower_path;
-
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
- sdcardfs_get_lower_path(dentry, &lower_path);
- lower_dentry = lower_path.dentry;
- lower_parent_dentry = lock_parent(lower_dentry);
-
- err = vfs_mknod(d_inode(lower_parent_dentry), lower_dentry, mode, dev);
- if (err)
- goto out;
-
- err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
- if (err)
- goto out;
- fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
- fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
- unlock_dir(lower_parent_dentry);
- sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED();
- return err;
-}
-#endif
-
/*
* The locking rules in sdcardfs_rename are complex. We could use a simpler
* superblock-level name-space lock for renames and copy-ups.
@@ -489,7 +411,10 @@ static int sdcardfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred, SDCARDFS_I(new_dir));
+ saved_cred = override_fsids(SDCARDFS_SB(old_dir->i_sb),
+ SDCARDFS_I(new_dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_real_lower(old_dentry, &lower_old_path);
sdcardfs_get_lower_path(new_dentry, &lower_new_path);
@@ -536,7 +461,7 @@ out:
dput(lower_new_dir_dentry);
sdcardfs_put_real_lower(old_dentry, &lower_old_path);
sdcardfs_put_lower_path(new_dentry, &lower_new_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
@@ -653,33 +578,7 @@ static int sdcardfs_permission(struct vfsmount *mnt, struct inode *inode, int ma
if (IS_POSIXACL(inode))
pr_warn("%s: This may be undefined behavior...\n", __func__);
err = generic_permission(&tmp, mask);
- /* XXX
- * Original sdcardfs code calls inode_permission(lower_inode,.. )
- * for checking inode permission. But doing such things here seems
- * duplicated work, because the functions called after this func,
- * such as vfs_create, vfs_unlink, vfs_rename, and etc,
- * does exactly same thing, i.e., they calls inode_permission().
- * So we just let they do the things.
- * If there are any security hole, just uncomment following if block.
- */
-#if 0
- if (!err) {
- /*
- * Permission check on lower_inode(=EXT4).
- * we check it with AID_MEDIA_RW permission
- */
- struct inode *lower_inode;
-
- OVERRIDE_CRED(SDCARDFS_SB(inode->sb));
-
- lower_inode = sdcardfs_lower_inode(inode);
- err = inode_permission(lower_inode, mask);
-
- REVERT_CRED();
- }
-#endif
return err;
-
}
static int sdcardfs_setattr_wrn(struct dentry *dentry, struct iattr *ia)
@@ -757,7 +656,10 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
goto out_err;
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dentry->d_sb), saved_cred, SDCARDFS_I(inode));
+ saved_cred = override_fsids(SDCARDFS_SB(dentry->d_sb),
+ SDCARDFS_I(inode)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
@@ -816,7 +718,7 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct
out:
sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_err:
return err;
}
@@ -898,13 +800,6 @@ const struct inode_operations sdcardfs_dir_iops = {
.setattr = sdcardfs_setattr_wrn,
.setattr2 = sdcardfs_setattr,
.getattr = sdcardfs_getattr,
- /* XXX Following operations are implemented,
- * but FUSE(sdcard) or FAT does not support them
- * These methods are *NOT* perfectly tested.
- .symlink = sdcardfs_symlink,
- .link = sdcardfs_link,
- .mknod = sdcardfs_mknod,
- */
};
const struct inode_operations sdcardfs_main_iops = {
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 843fcd216116..e1bff0da1925 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -428,7 +428,12 @@ struct dentry *sdcardfs_lookup(struct inode *dir, struct dentry *dentry,
}
/* save current_cred and override it */
- OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out_err;
+ }
sdcardfs_get_lower_path(parent, &lower_parent_path);
@@ -459,7 +464,7 @@ struct dentry *sdcardfs_lookup(struct inode *dir, struct dentry *dentry,
out:
sdcardfs_put_lower_path(parent, &lower_parent_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_err:
dput(parent);
return ret;
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index eda8e7aa6911..b2e7db857dcf 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -88,31 +88,6 @@
(x)->i_mode = ((x)->i_mode & S_IFMT) | 0775;\
} while (0)
-/* OVERRIDE_CRED() and REVERT_CRED()
- * OVERRIDE_CRED()
- * backup original task->cred
- * and modifies task->cred->fsuid/fsgid to specified value.
- * REVERT_CRED()
- * restore original task->cred->fsuid/fsgid.
- * These two macro should be used in pair, and OVERRIDE_CRED() should be
- * placed at the beginning of a function, right after variable declaration.
- */
-#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info) \
- do { \
- saved_cred = override_fsids(sdcardfs_sbi, info->data); \
- if (!saved_cred) \
- return -ENOMEM; \
- } while (0)
-
-#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info) \
- do { \
- saved_cred = override_fsids(sdcardfs_sbi, info->data); \
- if (!saved_cred) \
- return ERR_PTR(-ENOMEM); \
- } while (0)
-
-#define REVERT_CRED(saved_cred) revert_fsids(saved_cred)
-
/* Android 5.0 support */
/* Permission mode for a specific node. Controls how file permissions
diff --git a/include/crypto/ice.h b/include/crypto/ice.h
index 133041ea9ec8..0b8f048f39cb 100644
--- a/include/crypto/ice.h
+++ b/include/crypto/ice.h
@@ -49,6 +49,18 @@ struct ice_data_setting {
bool encr_bypass;
};
+/* MSM ICE Crypto Data Unit of target DUN of Transfer Request */
+enum ice_crypto_data_unit {
+ ICE_CRYPTO_DATA_UNIT_512_B = 0,
+ ICE_CRYPTO_DATA_UNIT_1_KB = 1,
+ ICE_CRYPTO_DATA_UNIT_2_KB = 2,
+ ICE_CRYPTO_DATA_UNIT_4_KB = 3,
+ ICE_CRYPTO_DATA_UNIT_8_KB = 4,
+ ICE_CRYPTO_DATA_UNIT_16_KB = 5,
+ ICE_CRYPTO_DATA_UNIT_32_KB = 6,
+ ICE_CRYPTO_DATA_UNIT_64_KB = 7,
+};
+
typedef void (*ice_error_cb)(void *, u32 error);
struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node);
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
index 8108c989af65..f9781b52b07c 100644
--- a/include/dt-bindings/clock/mdss-10nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,16 +26,32 @@
#define PCLK_SRC_MUX_0_CLK 7
#define PCLK_SRC_0_CLK 8
#define PCLK_MUX_0_CLK 9
-#define VCO_CLK_1 10
-#define PLL_OUT_DIV_1_CLK 11
-#define BITCLK_SRC_1_CLK 12
-#define BYTECLK_SRC_1_CLK 13
-#define POST_BIT_DIV_1_CLK 14
-#define POST_VCO_DIV_1_CLK 15
-#define BYTECLK_MUX_1_CLK 16
-#define PCLK_SRC_MUX_1_CLK 17
-#define PCLK_SRC_1_CLK 18
-#define PCLK_MUX_1_CLK 19
+#define SHADOW_VCO_CLK_0 10
+#define SHADOW_PLL_OUT_DIV_0_CLK 11
+#define SHADOW_BITCLK_SRC_0_CLK 12
+#define SHADOW_BYTECLK_SRC_0_CLK 13
+#define SHADOW_POST_BIT_DIV_0_CLK 14
+#define SHADOW_POST_VCO_DIV_0_CLK 15
+#define SHADOW_PCLK_SRC_MUX_0_CLK 16
+#define SHADOW_PCLK_SRC_0_CLK 17
+#define VCO_CLK_1 18
+#define PLL_OUT_DIV_1_CLK 19
+#define BITCLK_SRC_1_CLK 20
+#define BYTECLK_SRC_1_CLK 21
+#define POST_BIT_DIV_1_CLK 22
+#define POST_VCO_DIV_1_CLK 23
+#define BYTECLK_MUX_1_CLK 24
+#define PCLK_SRC_MUX_1_CLK 25
+#define PCLK_SRC_1_CLK 26
+#define PCLK_MUX_1_CLK 27
+#define SHADOW_VCO_CLK_1 28
+#define SHADOW_PLL_OUT_DIV_1_CLK 29
+#define SHADOW_BITCLK_SRC_1_CLK 30
+#define SHADOW_BYTECLK_SRC_1_CLK 31
+#define SHADOW_POST_BIT_DIV_1_CLK 32
+#define SHADOW_POST_VCO_DIV_1_CLK 33
+#define SHADOW_PCLK_SRC_MUX_1_CLK 34
+#define SHADOW_PCLK_SRC_1_CLK 35
/* DP PLL clocks */
#define DP_VCO_CLK 0
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index 4fa262337e9e..1101e2eb0492 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -92,6 +92,10 @@ of_cpufreq_power_cooling_register(struct device_node *np,
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
+
+extern void cpu_cooling_max_level_notifier_register(struct notifier_block *n);
+extern void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n);
+extern const struct cpumask *cpu_cooling_get_max_level_cpumask(void);
#else /* !CONFIG_CPU_THERMAL */
static inline struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus)
@@ -138,6 +142,21 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
return THERMAL_CSTATE_INVALID;
}
+
+static inline
+void cpu_cooling_max_level_notifier_register(struct notifier_block *n)
+{
+}
+
+static inline
+void cpu_cooling_max_level_notifier_unregister(struct notifier_block *n)
+{
+}
+
+static inline const struct cpumask *cpu_cooling_get_max_level_cpumask(void)
+{
+ return cpu_none_mask;
+}
#endif /* CONFIG_CPU_THERMAL */
#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 7af63e71fe66..aa5e331aed2d 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -23,12 +23,9 @@
#define FS_CRYPTO_BLOCK_SIZE 16
-/* iv sector for security/pfe/pfk_fscrypt.c and f2fs.
- * sizeof is required to accommodate all data lengths.
- */
+/* iv sector for security/pfe/pfk_fscrypt.c and f2fs */
#define PG_DUN(i, p) \
- ((((i)->i_ino & 0xffffffff) << (sizeof((i)->i_ino)/2)) | \
- ((p)->index & 0xffffffff))
+ (((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p)->index & 0xffffffff))
struct fscrypt_info;
@@ -90,7 +87,6 @@ struct fscrypt_operations {
bool (*dummy_context)(struct inode *);
bool (*empty_dir)(struct inode *);
unsigned (*max_namelen)(struct inode *);
- bool (*is_encrypted)(struct inode *);
};
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
@@ -112,7 +108,8 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
return true;
- if (contents_mode == FS_ENCRYPTION_MODE_PRIVATE)
+ if (contents_mode == FS_ENCRYPTION_MODE_PRIVATE &&
+ filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
return true;
return false;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e35e6de633b9..9b9f65d99873 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned char *vec);
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
+ pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 325f649d77ff..377417200728 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -210,6 +210,7 @@ extern struct task_group root_task_group;
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.nr_cpus_allowed= NR_CPUS, \
+ .cpus_requested = CPU_MASK_ALL, \
.mm = NULL, \
.active_mm = &init_mm, \
.restart_block = { \
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cb2cc30b8465..91db63260ca6 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -410,7 +410,7 @@ struct mm_struct {
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
rwlock_t mm_rb_lock;
#endif
- u32 vmacache_seqnum; /* per-thread vmacache */
+ u64 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 47c5b391fb8d..4c7081a6482c 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -497,8 +497,8 @@ struct perf_addr_filters_head {
* enum perf_event_active_state - the states of a event
*/
enum perf_event_active_state {
- PERF_EVENT_STATE_DEAD = -5,
- PERF_EVENT_STATE_ZOMBIE = -4,
+ PERF_EVENT_STATE_DORMANT = -5,
+ PERF_EVENT_STATE_DEAD = -4,
PERF_EVENT_STATE_EXIT = -3,
PERF_EVENT_STATE_ERROR = -2,
PERF_EVENT_STATE_OFF = -1,
@@ -721,7 +721,13 @@ struct perf_event {
/* Is this event shared with other events */
bool shared;
- struct list_head zombie_entry;
+
+ /*
+ * Entry into the list that holds the events whose CPUs
+ * are offline. These events will be installed once the
+ * CPU wakes up and will be removed from the list after that
+ */
+ struct list_head dormant_event_entry;
#endif /* CONFIG_PERF_EVENTS */
};
@@ -1401,9 +1407,11 @@ static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
#ifdef CONFIG_PERF_EVENTS
int perf_event_init_cpu(unsigned int cpu);
int perf_event_exit_cpu(unsigned int cpu);
+int perf_event_restart_events(unsigned int cpu);
#else
#define perf_event_init_cpu NULL
#define perf_event_exit_cpu NULL
+#define perf_event_restart_events NULL
#endif
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 00629e8cdf21..882d630f721b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1802,6 +1802,7 @@ struct task_struct {
unsigned int policy;
int nr_cpus_allowed;
cpumask_t cpus_allowed;
+ cpumask_t cpus_requested;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@@ -1828,7 +1829,7 @@ struct task_struct {
struct mm_struct *mm, *active_mm;
/* per-thread vma caching */
- u32 vmacache_seqnum;
+ u64 vmacache_seqnum;
struct vm_area_struct *vmacache[VMACACHE_SIZE];
#if defined(SPLIT_RSS_COUNTING)
struct task_rss_stat rss_stat;
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 4c679792f4bf..52b655af94f0 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -97,7 +97,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT,
#ifdef CONFIG_DEBUG_VM_VMACACHE
VMACACHE_FIND_CALLS,
VMACACHE_FIND_HITS,
- VMACACHE_FULL_FLUSHES,
#endif
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
SPECULATIVE_PGFAULT,
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
index c3fa0fd43949..4f58ff2dacd6 100644
--- a/include/linux/vmacache.h
+++ b/include/linux/vmacache.h
@@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
}
-extern void vmacache_flush_all(struct mm_struct *mm);
extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
unsigned long addr);
@@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
static inline void vmacache_invalidate(struct mm_struct *mm)
{
mm->vmacache_seqnum++;
-
- /* deal with overflows */
- if (unlikely(mm->vmacache_seqnum == 0))
- vmacache_flush_all(mm);
}
#endif /* __LINUX_VMACACHE_H */
diff --git a/include/microvisor/kernel/microvisor.h b/include/microvisor/kernel/microvisor.h
index 1a30d1fe18d0..3965243a9ecb 100644
--- a/include/microvisor/kernel/microvisor.h
+++ b/include/microvisor/kernel/microvisor.h
@@ -29,15 +29,10 @@
#define OKL4_SDK_VERSION_MAJOR 5
/** SDK Minor number */
#define OKL4_SDK_VERSION_MINOR 3
-/**
- * If defined, indicates this is an internal development version.
- * In this case, OKL4_SDK_VERSION_RELEASE == -1
- */
-#define OKL4_SDK_VERSION_DEVELOPMENT 1
/** SDK Release (revision) number */
-#define OKL4_SDK_VERSION_RELEASE (-1)
+#define OKL4_SDK_VERSION_RELEASE 6
/** SDK Maintenance number. Indicates the maintenance sequence revision. */
-#define OKL4_SDK_VERSION_MAINTENANCE 0
+#define OKL4_SDK_VERSION_MAINTENANCE 15
/** @addtogroup lib_microvisor_helpers Microvisor Helpers
diff --git a/include/microvisor/kernel/syscalls.h b/include/microvisor/kernel/syscalls.h
index fdc2c0d0e5f4..d3c4a5324dc9 100644
--- a/include/microvisor/kernel/syscalls.h
+++ b/include/microvisor/kernel/syscalls.h
@@ -582,6 +582,123 @@ _okl4_sys_axon_trigger_send(okl4_kcap_t axon_id)
/**
*
+ * @brief Re-start a vCPU that was stopped for debug.
+ *
+ * @details
+ * This operation starts a vCPU that was stopped for debug, either with
+ * an
+ * explicit debug_suspend operation, or because of a single-step or
+ * hitting a
+ * breakpoint instruction.
+ *
+ * @param target
+ * @param single_step
+ * If true, single-step the target.
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_debug_resume(okl4_kcap_t target, okl4_bool_t single_step)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)target;
+ register uint32_t r1 asm("r1") = (uint32_t)single_step;
+ __asm__ __volatile__(
+ ""hvc(5208)"\n\t"
+ : "+r"(r0), "+r"(r1)
+ :
+ : "cc", "memory", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_debug_resume(okl4_kcap_t target, okl4_bool_t single_step)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)single_step;
+ __asm__ __volatile__(
+ "" hvc(5208) "\n\t"
+ : "+r"(x0), "+r"(x1)
+ :
+ : "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
+ * @brief Stop a vCPU executing for debug purposes.
+ *
+ * @details
+ * This operation stops a vCPU's execution until next restarted by the
+ * corresponding debug resume.
+ *
+ * Note: A vCPU cannot debug-suspend itself.
+ *
+ * @param target
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_debug_suspend(okl4_kcap_t target)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)target;
+ __asm__ __volatile__(
+ ""hvc(5209)"\n\t"
+ : "+r"(r0)
+ :
+ : "cc", "memory", "r1", "r2", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_debug_suspend(okl4_kcap_t target)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)target;
+ __asm__ __volatile__(
+ "" hvc(5209) "\n\t"
+ : "+r"(x0)
+ :
+ : "cc", "memory", "x1", "x2", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
+ return (okl4_error_t)x0;
+}
+
+#endif
+
+/**
+ *
* @brief Acknowledge the delivery of an interrupt.
*
* @details
@@ -4516,12 +4633,13 @@ _okl4_sys_schedule_profile_vcpu_get_data(okl4_kcap_t vcpu,
/**
*
- * OKL4 Microvisor system call: SCHEDULER_SUSPEND
+ * OKL4 Microvisor system call: SCHEDULER_AFFINITY_GET
*
* @param scheduler_id
- * @param power_state
+ * @param vcpu_id
*
* @retval error
+ * @retval cpu_index
*
*/
@@ -4530,40 +4648,104 @@ _okl4_sys_schedule_profile_vcpu_get_data(okl4_kcap_t vcpu,
#if defined(__RVCT__) || defined(__RVCT_GNU__)
#elif defined(__ADS__)
#else
-OKL4_FORCE_INLINE okl4_error_t
-_okl4_sys_scheduler_suspend(okl4_kcap_t scheduler_id,
- okl4_power_state_t power_state)
+OKL4_FORCE_INLINE struct _okl4_sys_scheduler_affinity_get_return
+_okl4_sys_scheduler_affinity_get(okl4_kcap_t scheduler_id, okl4_kcap_t vcpu_id)
{
+ struct _okl4_sys_scheduler_affinity_get_return result;
+
register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
- register uint32_t r1 asm("r1") = (uint32_t)power_state;
+ register uint32_t r1 asm("r1") = (uint32_t)vcpu_id;
__asm__ __volatile__(
- ""hvc(5150)"\n\t"
+ ""hvc(5182)"\n\t"
: "+r"(r0), "+r"(r1)
:
: "cc", "memory", "r2", "r3", "r4", "r5"
);
- return (okl4_error_t)r0;
+ result.error = (okl4_error_t)(r0);
+ result.cpu_index = (okl4_cpu_id_t)(r1);
+ return result;
}
#endif
#else
-OKL4_FORCE_INLINE okl4_error_t
-_okl4_sys_scheduler_suspend(okl4_kcap_t scheduler_id,
- okl4_power_state_t power_state)
+OKL4_FORCE_INLINE struct _okl4_sys_scheduler_affinity_get_return
+_okl4_sys_scheduler_affinity_get(okl4_kcap_t scheduler_id, okl4_kcap_t vcpu_id)
{
+ struct _okl4_sys_scheduler_affinity_get_return result;
+
register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
- register okl4_register_t x1 asm("x1") = (okl4_register_t)power_state;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)vcpu_id;
__asm__ __volatile__(
- "" hvc(5150) "\n\t"
+ "" hvc(5182) "\n\t"
: "+r"(x0), "+r"(x1)
:
: "cc", "memory", "x2", "x3", "x4", "x5", "x6", "x7"
);
+ result.error = (okl4_error_t)(x0);
+ result.cpu_index = (okl4_cpu_id_t)(x1);
+ return result;
+}
+
+#endif
+
+/**
+ *
+ * OKL4 Microvisor system call: SCHEDULER_AFFINITY_SET
+ *
+ * @param scheduler_id
+ * @param vcpu_id
+ * @param cpu_index
+ *
+ * @retval error
+ *
+ */
+
+#if defined(__ARM_EABI__)
+
+#if defined(__RVCT__) || defined(__RVCT_GNU__)
+#elif defined(__ADS__)
+#else
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_scheduler_affinity_set(okl4_kcap_t scheduler_id, okl4_kcap_t vcpu_id,
+ okl4_cpu_id_t cpu_index)
+{
+ register uint32_t r0 asm("r0") = (uint32_t)scheduler_id;
+ register uint32_t r1 asm("r1") = (uint32_t)vcpu_id;
+ register uint32_t r2 asm("r2") = (uint32_t)cpu_index;
+ __asm__ __volatile__(
+ ""hvc(5183)"\n\t"
+ : "+r"(r0), "+r"(r1), "+r"(r2)
+ :
+ : "cc", "memory", "r3", "r4", "r5"
+ );
+
+
+ return (okl4_error_t)r0;
+}
+#endif
+
+#else
+
+OKL4_FORCE_INLINE okl4_error_t
+_okl4_sys_scheduler_affinity_set(okl4_kcap_t scheduler_id, okl4_kcap_t vcpu_id,
+ okl4_cpu_id_t cpu_index)
+{
+ register okl4_register_t x0 asm("x0") = (okl4_register_t)scheduler_id;
+ register okl4_register_t x1 asm("x1") = (okl4_register_t)vcpu_id;
+ register okl4_register_t x2 asm("x2") = (okl4_register_t)cpu_index;
+ __asm__ __volatile__(
+ "" hvc(5183) "\n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2)
+ :
+ : "cc", "memory", "x3", "x4", "x5", "x6", "x7"
+ );
+
+
return (okl4_error_t)x0;
}
@@ -5885,6 +6067,10 @@ _okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
#define OKL4_SYSCALL_AXON_TRIGGER_SEND 5185
+#define OKL4_SYSCALL_DEBUG_RESUME 5208
+
+#define OKL4_SYSCALL_DEBUG_SUSPEND 5209
+
#define OKL4_SYSCALL_INTERRUPT_ACK 5128
#define OKL4_SYSCALL_INTERRUPT_ATTACH_PRIVATE 5134
@@ -5993,7 +6179,9 @@ _okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
#define OKL4_SYSCALL_SCHEDULE_PROFILE_VCPU_GET_DATA 5173
-#define OKL4_SYSCALL_SCHEDULER_SUSPEND 5150
+#define OKL4_SYSCALL_SCHEDULER_AFFINITY_GET 5182
+
+#define OKL4_SYSCALL_SCHEDULER_AFFINITY_SET 5183
#define OKL4_SYSCALL_TIMER_CANCEL 5176
@@ -6039,6 +6227,8 @@ _okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
/*lint -esym(621, _okl4_sys_axon_set_send_queue) */
/*lint -esym(621, _okl4_sys_axon_set_send_segment) */
/*lint -esym(621, _okl4_sys_axon_trigger_send) */
+/*lint -esym(621, _okl4_sys_debug_resume) */
+/*lint -esym(621, _okl4_sys_debug_suspend) */
/*lint -esym(621, _okl4_sys_interrupt_ack) */
/*lint -esym(621, _okl4_sys_interrupt_attach_private) */
/*lint -esym(621, _okl4_sys_interrupt_attach_shared) */
@@ -6093,7 +6283,8 @@ _okl4_sys_vinterrupt_raise(okl4_kcap_t virqline, okl4_virq_flags_t payload)
/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_disable) */
/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_enable) */
/*lint -esym(621, _okl4_sys_schedule_profile_vcpu_get_data) */
-/*lint -esym(621, _okl4_sys_scheduler_suspend) */
+/*lint -esym(621, _okl4_sys_scheduler_affinity_get) */
+/*lint -esym(621, _okl4_sys_scheduler_affinity_set) */
/*lint -esym(621, _okl4_sys_timer_cancel) */
/*lint -esym(621, _okl4_sys_timer_get_resolution) */
/*lint -esym(621, _okl4_sys_timer_get_time) */
diff --git a/include/microvisor/kernel/types.h b/include/microvisor/kernel/types.h
index c87285c776af..d668fa3f7db3 100644
--- a/include/microvisor/kernel/types.h
+++ b/include/microvisor/kernel/types.h
@@ -534,6 +534,12 @@ typedef uint32_t okl4_arm_psci_function_t;
#define OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY ((okl4_arm_psci_function_t)0x10U)
/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT) */
#define OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT ((okl4_arm_psci_function_t)0x11U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET2) */
+#define OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET2 ((okl4_arm_psci_function_t)0x12U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MEM_PROTECT) */
+#define OKL4_ARM_PSCI_FUNCTION_MEM_PROTECT ((okl4_arm_psci_function_t)0x13U)
+/*lint -esym(621, OKL4_ARM_PSCI_FUNCTION_MEM_PROTECT_CHECK_RANGE) */
+#define OKL4_ARM_PSCI_FUNCTION_MEM_PROTECT_CHECK_RANGE ((okl4_arm_psci_function_t)0x14U)
/*lint -esym(714, okl4_arm_psci_function_is_element_of) */
OKL4_FORCE_INLINE okl4_bool_t
@@ -563,7 +569,10 @@ okl4_arm_psci_function_is_element_of(okl4_arm_psci_function_t var)
(var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_SUSPEND) ||
(var == OKL4_ARM_PSCI_FUNCTION_PSCI_SET_SUSPEND_MODE) ||
(var == OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY) ||
- (var == OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT));
+ (var == OKL4_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_SYSTEM_RESET2) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_MEM_PROTECT) ||
+ (var == OKL4_ARM_PSCI_FUNCTION_MEM_PROTECT_CHECK_RANGE));
}
@@ -616,220 +625,6 @@ okl4_arm_psci_result_is_element_of(okl4_arm_psci_result_t var)
/**
- - BITS 15..0 - @ref OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE
- - BIT 16 - @ref OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE
- - BITS 25..24 - @ref OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE
-*/
-
-/*lint -esym(621, okl4_arm_psci_suspend_state_t) */
-typedef uint32_t okl4_arm_psci_suspend_state_t;
-
-/*lint -esym(621, okl4_arm_psci_suspend_state_getstateid) */
-/*lint -esym(714, okl4_arm_psci_suspend_state_getstateid) */
-OKL4_FORCE_INLINE uint32_t
-okl4_arm_psci_suspend_state_getstateid(const okl4_arm_psci_suspend_state_t *x);
-
-/*lint -esym(621, okl4_arm_psci_suspend_state_setstateid) */
-OKL4_FORCE_INLINE void
-okl4_arm_psci_suspend_state_setstateid(okl4_arm_psci_suspend_state_t *x, uint32_t _state_id);
-
-/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerdown) */
-/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerdown) */
-OKL4_FORCE_INLINE okl4_bool_t
-okl4_arm_psci_suspend_state_getpowerdown(const okl4_arm_psci_suspend_state_t *x);
-
-/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerdown) */
-OKL4_FORCE_INLINE void
-okl4_arm_psci_suspend_state_setpowerdown(okl4_arm_psci_suspend_state_t *x, okl4_bool_t _power_down);
-
-/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerlevel) */
-/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerlevel) */
-OKL4_FORCE_INLINE uint32_t
-okl4_arm_psci_suspend_state_getpowerlevel(const okl4_arm_psci_suspend_state_t *x);
-
-/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerlevel) */
-OKL4_FORCE_INLINE void
-okl4_arm_psci_suspend_state_setpowerlevel(okl4_arm_psci_suspend_state_t *x, uint32_t _power_level);
-
-/*lint -esym(714, okl4_arm_psci_suspend_state_init) */
-OKL4_FORCE_INLINE void
-okl4_arm_psci_suspend_state_init(okl4_arm_psci_suspend_state_t *x);
-
-/*lint -esym(714, okl4_arm_psci_suspend_state_cast) */
-OKL4_FORCE_INLINE okl4_arm_psci_suspend_state_t
-okl4_arm_psci_suspend_state_cast(uint32_t p, okl4_bool_t force);
-
-
-
-/*lint -esym(621, OKL4_ARM_PSCI_POWER_LEVEL_CPU) */
-#define OKL4_ARM_PSCI_POWER_LEVEL_CPU ((okl4_arm_psci_suspend_state_t)(0U))
-
-/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_STATE_ID_MASK) */
-#define OKL4_ARM_PSCI_SUSPEND_STATE_STATE_ID_MASK ((okl4_arm_psci_suspend_state_t)65535U) /* Deprecated */
-/*lint -esym(621, OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)65535U)
-/*lint -esym(621, OKL4_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE (0)
-/*lint -esym(621, OKL4_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE (16)
-/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_POWER_DOWN_MASK) */
-#define OKL4_ARM_PSCI_SUSPEND_STATE_POWER_DOWN_MASK ((okl4_arm_psci_suspend_state_t)1U << 16) /* Deprecated */
-/*lint -esym(621, OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)1U << 16)
-/*lint -esym(621, OKL4_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (16)
-/*lint -esym(621, OKL4_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1)
-/*lint -esym(621, OKL4_ARM_PSCI_SUSPEND_STATE_POWER_LEVEL_MASK) */
-#define OKL4_ARM_PSCI_SUSPEND_STATE_POWER_LEVEL_MASK ((okl4_arm_psci_suspend_state_t)3U << 24) /* Deprecated */
-/*lint -esym(621, OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE ((okl4_arm_psci_suspend_state_t)3U << 24)
-/*lint -esym(621, OKL4_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (24)
-/*lint -esym(621, OKL4_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (2)
-
-
-/*lint -sem(okl4_arm_psci_suspend_state_getstateid, 1p, @n >= 0 && @n <= 65535) */
-/*lint -esym(621, okl4_arm_psci_suspend_state_getstateid) */
-/*lint -esym(714, okl4_arm_psci_suspend_state_getstateid) */
-OKL4_FORCE_INLINE uint32_t
-okl4_arm_psci_suspend_state_getstateid(const okl4_arm_psci_suspend_state_t *x)
-{
- uint32_t field;
- union {
- /*lint -e{806} -e{958} -e{959} */
- struct {
- uint32_t field : 16;
- } bits;
- okl4_arm_psci_suspend_state_t raw;
- } _conv;
-
- _conv.raw = *x;
- field = (uint32_t)_conv.bits.field;
- return field;
-}
-
-/*lint -sem(okl4_arm_psci_suspend_state_setstateid, 2n >= 0 && 2n <= 65535) */
-/*lint -esym(714, okl4_arm_psci_suspend_state_setstateid) */
-
-/*lint -esym(621, okl4_arm_psci_suspend_state_setstateid) */
-OKL4_FORCE_INLINE void
-okl4_arm_psci_suspend_state_setstateid(okl4_arm_psci_suspend_state_t *x, uint32_t _state_id)
-{
- union {
- /*lint -e{806} -e{958} -e{959} */
- struct {
- uint32_t field : 16;
- } bits;
- okl4_arm_psci_suspend_state_t raw;
- } _conv;
-
- _conv.raw = *x;
- _conv.bits.field = (uint32_t)_state_id;
- *x = _conv.raw;
-}
-/*lint -sem(okl4_arm_psci_suspend_state_getpowerdown, 1p, @n >= 0 && @n <= 1) */
-/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerdown) */
-/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerdown) */
-OKL4_FORCE_INLINE okl4_bool_t
-okl4_arm_psci_suspend_state_getpowerdown(const okl4_arm_psci_suspend_state_t *x)
-{
- okl4_bool_t field;
- union {
- /*lint -e{806} -e{958} -e{959} */
- struct {
- uint32_t _skip : 16;
- _Bool field : 1;
- } bits;
- okl4_arm_psci_suspend_state_t raw;
- } _conv;
-
- _conv.raw = *x;
- field = (okl4_bool_t)_conv.bits.field;
- return field;
-}
-
-/*lint -sem(okl4_arm_psci_suspend_state_setpowerdown, 2n >= 0 && 2n <= 1) */
-/*lint -esym(714, okl4_arm_psci_suspend_state_setpowerdown) */
-
-/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerdown) */
-OKL4_FORCE_INLINE void
-okl4_arm_psci_suspend_state_setpowerdown(okl4_arm_psci_suspend_state_t *x, okl4_bool_t _power_down)
-{
- union {
- /*lint -e{806} -e{958} -e{959} */
- struct {
- uint32_t _skip : 16;
- _Bool field : 1;
- } bits;
- okl4_arm_psci_suspend_state_t raw;
- } _conv;
-
- _conv.raw = *x;
- _conv.bits.field = (_Bool)_power_down;
- *x = _conv.raw;
-}
-/*lint -sem(okl4_arm_psci_suspend_state_getpowerlevel, 1p, @n >= 0 && @n <= 3) */
-/*lint -esym(621, okl4_arm_psci_suspend_state_getpowerlevel) */
-/*lint -esym(714, okl4_arm_psci_suspend_state_getpowerlevel) */
-OKL4_FORCE_INLINE uint32_t
-okl4_arm_psci_suspend_state_getpowerlevel(const okl4_arm_psci_suspend_state_t *x)
-{
- uint32_t field;
- union {
- /*lint -e{806} -e{958} -e{959} */
- struct {
- uint32_t _skip : 24;
- uint32_t field : 2;
- } bits;
- okl4_arm_psci_suspend_state_t raw;
- } _conv;
-
- _conv.raw = *x;
- field = (uint32_t)_conv.bits.field;
- return field;
-}
-
-/*lint -sem(okl4_arm_psci_suspend_state_setpowerlevel, 2n >= 0 && 2n <= 3) */
-/*lint -esym(714, okl4_arm_psci_suspend_state_setpowerlevel) */
-
-/*lint -esym(621, okl4_arm_psci_suspend_state_setpowerlevel) */
-OKL4_FORCE_INLINE void
-okl4_arm_psci_suspend_state_setpowerlevel(okl4_arm_psci_suspend_state_t *x, uint32_t _power_level)
-{
- union {
- /*lint -e{806} -e{958} -e{959} */
- struct {
- uint32_t _skip : 24;
- uint32_t field : 2;
- } bits;
- okl4_arm_psci_suspend_state_t raw;
- } _conv;
-
- _conv.raw = *x;
- _conv.bits.field = (uint32_t)_power_level;
- *x = _conv.raw;
-}
-/*lint -esym(714, okl4_arm_psci_suspend_state_init) */
-OKL4_FORCE_INLINE void
-okl4_arm_psci_suspend_state_init(okl4_arm_psci_suspend_state_t *x)
-{
- *x = (okl4_arm_psci_suspend_state_t)0U;
-}
-
-/*lint -esym(714, okl4_arm_psci_suspend_state_cast) */
-OKL4_FORCE_INLINE okl4_arm_psci_suspend_state_t
-okl4_arm_psci_suspend_state_cast(uint32_t p, okl4_bool_t force)
-{
- okl4_arm_psci_suspend_state_t x = (okl4_arm_psci_suspend_state_t)p;
- (void)force;
- return x;
-}
-
-
-
-/**
- BIT 0 - @ref OKL4_MASK_MMU_ENABLE_ARM_SCTLR
- BIT 1 - @ref OKL4_MASK_ALIGNMENT_CHECK_ENABLE_ARM_SCTLR
- BIT 2 - @ref OKL4_MASK_DATA_CACHE_ENABLE_ARM_SCTLR
@@ -850,7 +645,7 @@ okl4_arm_psci_suspend_state_cast(uint32_t p, okl4_bool_t force)
- BIT 19 - @ref OKL4_MASK_WRITE_EXEC_NEVER_ARM_SCTLR
- BIT 20 - @ref OKL4_MASK_USER_WRITE_EXEC_NEVER_ARM_SCTLR
- BIT 22 - @ref OKL4_MASK_RESERVED22_ARM_SCTLR
- - BIT 23 - @ref OKL4_MASK_RESERVED23_ARM_SCTLR
+ - BIT 23 - @ref OKL4_MASK_SPAN_ARM_SCTLR
- BIT 24 - @ref OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR
- BIT 25 - @ref OKL4_MASK_EXCEPTION_ENDIANNESS_ARM_SCTLR
- BIT 28 - @ref OKL4_MASK_TEX_REMAP_ENABLE_ARM_SCTLR
@@ -961,10 +756,14 @@ okl4_arm_sctlr_setwriteexecnever(okl4_arm_sctlr_t *x, okl4_bool_t _write_exec_ne
OKL4_FORCE_INLINE uint32_t
okl4_arm_sctlr_getreserved22(const okl4_arm_sctlr_t *x);
-/*lint -esym(621, okl4_arm_sctlr_getreserved23) */
-/*lint -esym(714, okl4_arm_sctlr_getreserved23) */
+/*lint -esym(621, okl4_arm_sctlr_getspan) */
+/*lint -esym(714, okl4_arm_sctlr_getspan) */
OKL4_FORCE_INLINE uint32_t
-okl4_arm_sctlr_getreserved23(const okl4_arm_sctlr_t *x);
+okl4_arm_sctlr_getspan(const okl4_arm_sctlr_t *x);
+
+/*lint -esym(621, okl4_arm_sctlr_setspan) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setspan(okl4_arm_sctlr_t *x, uint32_t _span);
/*lint -esym(621, okl4_arm_sctlr_getel0endianness) */
/*lint -esym(714, okl4_arm_sctlr_getel0endianness) */
@@ -1254,14 +1053,14 @@ okl4_arm_sctlr_cast(uint32_t p, okl4_bool_t force);
#define OKL4_SHIFT_RESERVED22_ARM_SCTLR (22)
/*lint -esym(621, OKL4_WIDTH_RESERVED22_ARM_SCTLR) */
#define OKL4_WIDTH_RESERVED22_ARM_SCTLR (1)
-/*lint -esym(621, OKL4_ARM_SCTLR_RESERVED23_MASK) */
-#define OKL4_ARM_SCTLR_RESERVED23_MASK ((okl4_arm_sctlr_t)1U << 23) /* Deprecated */
-/*lint -esym(621, OKL4_MASK_RESERVED23_ARM_SCTLR) */
-#define OKL4_MASK_RESERVED23_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 23)
-/*lint -esym(621, OKL4_SHIFT_RESERVED23_ARM_SCTLR) */
-#define OKL4_SHIFT_RESERVED23_ARM_SCTLR (23)
-/*lint -esym(621, OKL4_WIDTH_RESERVED23_ARM_SCTLR) */
-#define OKL4_WIDTH_RESERVED23_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ARM_SCTLR_SPAN_MASK) */
+#define OKL4_ARM_SCTLR_SPAN_MASK ((okl4_arm_sctlr_t)1U << 23) /* Deprecated */
+/*lint -esym(621, OKL4_MASK_SPAN_ARM_SCTLR) */
+#define OKL4_MASK_SPAN_ARM_SCTLR ((okl4_arm_sctlr_t)1U << 23)
+/*lint -esym(621, OKL4_SHIFT_SPAN_ARM_SCTLR) */
+#define OKL4_SHIFT_SPAN_ARM_SCTLR (23)
+/*lint -esym(621, OKL4_WIDTH_SPAN_ARM_SCTLR) */
+#define OKL4_WIDTH_SPAN_ARM_SCTLR (1)
/*lint -esym(621, OKL4_ARM_SCTLR_EL0_ENDIANNESS_MASK) */
#define OKL4_ARM_SCTLR_EL0_ENDIANNESS_MASK ((okl4_arm_sctlr_t)1U << 24) /* Deprecated */
/*lint -esym(621, OKL4_MASK_EL0_ENDIANNESS_ARM_SCTLR) */
@@ -2082,11 +1881,11 @@ okl4_arm_sctlr_getreserved22(const okl4_arm_sctlr_t *x)
return field;
}
-/*lint -sem(okl4_arm_sctlr_getreserved23, 1p, @n >= 0 && @n <= 1) */
-/*lint -esym(621, okl4_arm_sctlr_getreserved23) */
-/*lint -esym(714, okl4_arm_sctlr_getreserved23) */
+/*lint -sem(okl4_arm_sctlr_getspan, 1p, @n >= 0 && @n <= 1) */
+/*lint -esym(621, okl4_arm_sctlr_getspan) */
+/*lint -esym(714, okl4_arm_sctlr_getspan) */
OKL4_FORCE_INLINE uint32_t
-okl4_arm_sctlr_getreserved23(const okl4_arm_sctlr_t *x)
+okl4_arm_sctlr_getspan(const okl4_arm_sctlr_t *x)
{
uint32_t field;
union {
@@ -2103,6 +1902,26 @@ okl4_arm_sctlr_getreserved23(const okl4_arm_sctlr_t *x)
return field;
}
+/*lint -sem(okl4_arm_sctlr_setspan, 2n >= 0 && 2n <= 1) */
+/*lint -esym(714, okl4_arm_sctlr_setspan) */
+
+/*lint -esym(621, okl4_arm_sctlr_setspan) */
+OKL4_FORCE_INLINE void
+okl4_arm_sctlr_setspan(okl4_arm_sctlr_t *x, uint32_t _span)
+{
+ union {
+ /*lint -e{806} -e{958} -e{959} */
+ struct {
+ uint32_t _skip : 23;
+ uint32_t field : 1;
+ } bits;
+ okl4_arm_sctlr_t raw;
+ } _conv;
+
+ _conv.raw = *x;
+ _conv.bits.field = (uint32_t)_span;
+ *x = _conv.raw;
+}
/*lint -sem(okl4_arm_sctlr_getel0endianness, 1p, @n >= 0 && @n <= 1) */
/*lint -esym(621, okl4_arm_sctlr_getel0endianness) */
/*lint -esym(714, okl4_arm_sctlr_getel0endianness) */
@@ -2325,8 +2144,6 @@ okl4_arm_sctlr_cast(uint32_t p, okl4_bool_t force)
x |= (okl4_arm_sctlr_t)0x800U; /* x.reserved11 */
x &= ~(okl4_arm_sctlr_t)0x400000U;
x |= (okl4_arm_sctlr_t)0x400000U; /* x.reserved22 */
- x &= ~(okl4_arm_sctlr_t)0x800000U;
- x |= (okl4_arm_sctlr_t)0x800000U; /* x.reserved23 */
}
return x;
}
@@ -6332,6 +6149,12 @@ typedef uint32_t okl4_cpu_exec_mode;
+
+typedef okl4_count_t okl4_cpu_id_t;
+
+
+
+
/**
CPU mode specifier
@@ -6816,20 +6639,17 @@ struct okl4_env_segment_table {
- @ref OKL4_ERROR_INTERRUPT_NOT_ATTACHED
- @ref OKL4_ERROR_INVALID_ARGUMENT
- @ref OKL4_ERROR_INVALID_DESIGNATOR
- - @ref OKL4_ERROR_INVALID_POWER_STATE
- @ref OKL4_ERROR_INVALID_SEGMENT_INDEX
- @ref OKL4_ERROR_MEMORY_FAULT
- @ref OKL4_ERROR_MISSING_MAPPING
- @ref OKL4_ERROR_NON_EMPTY_MMU_CONTEXT
- @ref OKL4_ERROR_NOT_IN_SEGMENT
- - @ref OKL4_ERROR_NOT_LAST_CPU
- @ref OKL4_ERROR_NO_RESOURCES
- @ref OKL4_ERROR_PIPE_BAD_STATE
- @ref OKL4_ERROR_PIPE_EMPTY
- @ref OKL4_ERROR_PIPE_FULL
- @ref OKL4_ERROR_PIPE_NOT_READY
- @ref OKL4_ERROR_PIPE_RECV_OVERFLOW
- - @ref OKL4_ERROR_POWER_VCPU_RESUMED
- @ref OKL4_ERROR_SEGMENT_USED
- @ref OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED
- @ref OKL4_ERROR_TIMER_ACTIVE
@@ -6837,6 +6657,7 @@ struct okl4_env_segment_table {
- @ref OKL4_ERROR_TRY_AGAIN
- @ref OKL4_ERROR_WOULD_BLOCK
- @ref OKL4_ERROR_ALLOC_EXHAUSTED
+ - @ref OKL4_ERROR_INVALID_AFFINITY
- @ref OKL4_ERROR_KSP_ERROR_0
- @ref OKL4_ERROR_KSP_ERROR_1
- @ref OKL4_ERROR_KSP_ERROR_2
@@ -6940,12 +6761,6 @@ typedef uint32_t okl4_error_t;
/*lint -esym(621, OKL4_ERROR_INVALID_DESIGNATOR) */
#define OKL4_ERROR_INVALID_DESIGNATOR ((okl4_error_t)0x11U)
/**
- The operation failed because the power_state
- argument is invalid.
-*/
-/*lint -esym(621, OKL4_ERROR_INVALID_POWER_STATE) */
-#define OKL4_ERROR_INVALID_POWER_STATE ((okl4_error_t)0x12U)
-/**
The operation failed because the given segment index does
not correspond to an attached physical segment.
*/
@@ -6976,12 +6791,6 @@ typedef uint32_t okl4_error_t;
/*lint -esym(621, OKL4_ERROR_NOT_IN_SEGMENT) */
#define OKL4_ERROR_NOT_IN_SEGMENT ((okl4_error_t)0x17U)
/**
- The operation failed because the caller is not on the last
- online cpu.
-*/
-/*lint -esym(621, OKL4_ERROR_NOT_LAST_CPU) */
-#define OKL4_ERROR_NOT_LAST_CPU ((okl4_error_t)0x18U)
-/**
Insufficient resources are available to perform the operation.
*/
/*lint -esym(621, OKL4_ERROR_NO_RESOURCES) */
@@ -7012,12 +6821,6 @@ typedef uint32_t okl4_error_t;
/*lint -esym(621, OKL4_ERROR_PIPE_RECV_OVERFLOW) */
#define OKL4_ERROR_PIPE_RECV_OVERFLOW ((okl4_error_t)0x1eU)
/**
- The operation failed because at least one VCPU has a monitored
- power state and is not currently suspended.
-*/
-/*lint -esym(621, OKL4_ERROR_POWER_VCPU_RESUMED) */
-#define OKL4_ERROR_POWER_VCPU_RESUMED ((okl4_error_t)0x1fU)
-/**
The operation requires a segment to be unused, or not attached
to an MMU context.
*/
@@ -7052,6 +6855,11 @@ typedef uint32_t okl4_error_t;
/*lint -esym(621, OKL4_ERROR_ALLOC_EXHAUSTED) */
#define OKL4_ERROR_ALLOC_EXHAUSTED ((okl4_error_t)0x26U)
/**
+ The operation failed because the affinity argument is invalid.
+*/
+/*lint -esym(621, OKL4_ERROR_INVALID_AFFINITY) */
+#define OKL4_ERROR_INVALID_AFFINITY ((okl4_error_t)0x27U)
+/**
KSP specific error 0
*/
/*lint -esym(621, OKL4_ERROR_KSP_ERROR_0) */
@@ -7148,9 +6956,9 @@ okl4_error_is_element_of(okl4_error_t var)
(var == OKL4_ERROR_INTERRUPT_ALREADY_ATTACHED) ||
(var == OKL4_ERROR_INTERRUPT_INVALID_IRQ) ||
(var == OKL4_ERROR_INTERRUPT_NOT_ATTACHED) ||
+ (var == OKL4_ERROR_INVALID_AFFINITY) ||
(var == OKL4_ERROR_INVALID_ARGUMENT) ||
(var == OKL4_ERROR_INVALID_DESIGNATOR) ||
- (var == OKL4_ERROR_INVALID_POWER_STATE) ||
(var == OKL4_ERROR_INVALID_SEGMENT_INDEX) ||
(var == OKL4_ERROR_KSP_ERROR_0) ||
(var == OKL4_ERROR_KSP_ERROR_1) ||
@@ -7170,7 +6978,6 @@ okl4_error_is_element_of(okl4_error_t var)
(var == OKL4_ERROR_NON_EMPTY_MMU_CONTEXT) ||
(var == OKL4_ERROR_NOT_IMPLEMENTED) ||
(var == OKL4_ERROR_NOT_IN_SEGMENT) ||
- (var == OKL4_ERROR_NOT_LAST_CPU) ||
(var == OKL4_ERROR_NO_RESOURCES) ||
(var == OKL4_ERROR_OK) ||
(var == OKL4_ERROR_PIPE_BAD_STATE) ||
@@ -7178,7 +6985,6 @@ okl4_error_is_element_of(okl4_error_t var)
(var == OKL4_ERROR_PIPE_FULL) ||
(var == OKL4_ERROR_PIPE_NOT_READY) ||
(var == OKL4_ERROR_PIPE_RECV_OVERFLOW) ||
- (var == OKL4_ERROR_POWER_VCPU_RESUMED) ||
(var == OKL4_ERROR_SEGMENT_USED) ||
(var == OKL4_ERROR_THREAD_ALREADY_WATCHING_SUSPENDED) ||
(var == OKL4_ERROR_TIMER_ACTIVE) ||
@@ -9075,13 +8881,16 @@ okl4_pipe_state_cast(uint8_t p, okl4_bool_t force)
typedef uint32_t okl4_power_state_t;
/*lint -esym(621, OKL4_POWER_STATE_IDLE) */
-#define OKL4_POWER_STATE_IDLE ((okl4_power_state_t)(0U))
+#define OKL4_POWER_STATE_IDLE ((okl4_power_state_t)(2147483648U))
-/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_BASE) */
-#define OKL4_POWER_STATE_PLATFORM_BASE ((okl4_power_state_t)(256U))
+/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_LAST) */
+#define OKL4_POWER_STATE_PLATFORM_LAST ((okl4_power_state_t)(2147483647U))
/*lint -esym(621, OKL4_POWER_STATE_POWEROFF) */
-#define OKL4_POWER_STATE_POWEROFF ((okl4_power_state_t)(1U))
+#define OKL4_POWER_STATE_POWEROFF ((okl4_power_state_t)(2147483650U))
+
+/*lint -esym(621, OKL4_POWER_STATE_RETENTION) */
+#define OKL4_POWER_STATE_RETENTION ((okl4_power_state_t)(2147483649U))
@@ -11298,151 +11107,157 @@ typedef uint32_t okl4_tracepoint_evt_t;
#define OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT ((okl4_tracepoint_evt_t)0xbU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) */
#define OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND ((okl4_tracepoint_evt_t)0xcU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_DEBUG_RESUME) */
+#define OKL4_TRACEPOINT_EVT_SWI_DEBUG_RESUME ((okl4_tracepoint_evt_t)0xdU)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_DEBUG_SUSPEND) */
+#define OKL4_TRACEPOINT_EVT_SWI_DEBUG_SUSPEND ((okl4_tracepoint_evt_t)0xeU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK ((okl4_tracepoint_evt_t)0xdU)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK ((okl4_tracepoint_evt_t)0xfU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE ((okl4_tracepoint_evt_t)0xeU)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE ((okl4_tracepoint_evt_t)0x10U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED ((okl4_tracepoint_evt_t)0xfU)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED ((okl4_tracepoint_evt_t)0x11U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH ((okl4_tracepoint_evt_t)0x10U)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH ((okl4_tracepoint_evt_t)0x12U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE ((okl4_tracepoint_evt_t)0x11U)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE ((okl4_tracepoint_evt_t)0x13U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI ((okl4_tracepoint_evt_t)0x12U)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_EOI ((okl4_tracepoint_evt_t)0x14U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING ((okl4_tracepoint_evt_t)0x13U)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING ((okl4_tracepoint_evt_t)0x15U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD ((okl4_tracepoint_evt_t)0x14U)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD ((okl4_tracepoint_evt_t)0x16U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS ((okl4_tracepoint_evt_t)0x15U)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS ((okl4_tracepoint_evt_t)0x17U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK ((okl4_tracepoint_evt_t)0x16U)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_MASK ((okl4_tracepoint_evt_t)0x18U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x17U)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x19U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT ((okl4_tracepoint_evt_t)0x18U)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT ((okl4_tracepoint_evt_t)0x1aU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG ((okl4_tracepoint_evt_t)0x19U)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG ((okl4_tracepoint_evt_t)0x1bU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL ((okl4_tracepoint_evt_t)0x1aU)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL ((okl4_tracepoint_evt_t)0x1cU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY ((okl4_tracepoint_evt_t)0x1bU)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY ((okl4_tracepoint_evt_t)0x1dU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK ((okl4_tracepoint_evt_t)0x1cU)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK ((okl4_tracepoint_evt_t)0x1eU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS ((okl4_tracepoint_evt_t)0x1dU)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS ((okl4_tracepoint_evt_t)0x1fU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) */
-#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK ((okl4_tracepoint_evt_t)0x1eU)
+#define OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK ((okl4_tracepoint_evt_t)0x20U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT) */
-#define OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT ((okl4_tracepoint_evt_t)0x1fU)
+#define OKL4_TRACEPOINT_EVT_SWI_KDB_INTERACT ((okl4_tracepoint_evt_t)0x21U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) */
-#define OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME ((okl4_tracepoint_evt_t)0x20U)
+#define OKL4_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME ((okl4_tracepoint_evt_t)0x22U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) */
-#define OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL ((okl4_tracepoint_evt_t)0x21U)
+#define OKL4_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL ((okl4_tracepoint_evt_t)0x23U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT ((okl4_tracepoint_evt_t)0x22U)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT ((okl4_tracepoint_evt_t)0x24U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT ((okl4_tracepoint_evt_t)0x23U)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT ((okl4_tracepoint_evt_t)0x25U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE ((okl4_tracepoint_evt_t)0x24U)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE ((okl4_tracepoint_evt_t)0x26U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN ((okl4_tracepoint_evt_t)0x25U)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN ((okl4_tracepoint_evt_t)0x27U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE ((okl4_tracepoint_evt_t)0x26U)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE ((okl4_tracepoint_evt_t)0x28U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN ((okl4_tracepoint_evt_t)0x27U)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN ((okl4_tracepoint_evt_t)0x29U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE ((okl4_tracepoint_evt_t)0x28U)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE ((okl4_tracepoint_evt_t)0x2aU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN ((okl4_tracepoint_evt_t)0x29U)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_MAP_PN ((okl4_tracepoint_evt_t)0x2bU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE ((okl4_tracepoint_evt_t)0x2aU)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE ((okl4_tracepoint_evt_t)0x2cU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN ((okl4_tracepoint_evt_t)0x2bU)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN ((okl4_tracepoint_evt_t)0x2dU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS ((okl4_tracepoint_evt_t)0x2cU)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS ((okl4_tracepoint_evt_t)0x2eU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS ((okl4_tracepoint_evt_t)0x2dU)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS ((okl4_tracepoint_evt_t)0x2fU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS ((okl4_tracepoint_evt_t)0x2eU)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS ((okl4_tracepoint_evt_t)0x30U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) */
-#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS ((okl4_tracepoint_evt_t)0x2fU)
+#define OKL4_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS ((okl4_tracepoint_evt_t)0x31U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) */
-#define OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL ((okl4_tracepoint_evt_t)0x30U)
+#define OKL4_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL ((okl4_tracepoint_evt_t)0x32U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL) */
-#define OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL ((okl4_tracepoint_evt_t)0x31U)
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_CONTROL ((okl4_tracepoint_evt_t)0x33U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV) */
-#define OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV ((okl4_tracepoint_evt_t)0x32U)
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_RECV ((okl4_tracepoint_evt_t)0x34U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND) */
-#define OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND ((okl4_tracepoint_evt_t)0x33U)
+#define OKL4_TRACEPOINT_EVT_SWI_PIPE_SEND ((okl4_tracepoint_evt_t)0x35U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) */
-#define OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE ((okl4_tracepoint_evt_t)0x34U)
+#define OKL4_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE ((okl4_tracepoint_evt_t)0x36U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) */
-#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER ((okl4_tracepoint_evt_t)0x35U)
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER ((okl4_tracepoint_evt_t)0x37U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) */
-#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS ((okl4_tracepoint_evt_t)0x36U)
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS ((okl4_tracepoint_evt_t)0x38U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) */
-#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 ((okl4_tracepoint_evt_t)0x37U)
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 ((okl4_tracepoint_evt_t)0x39U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) */
-#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER ((okl4_tracepoint_evt_t)0x38U)
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER ((okl4_tracepoint_evt_t)0x3aU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) */
-#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS ((okl4_tracepoint_evt_t)0x39U)
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS ((okl4_tracepoint_evt_t)0x3bU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) */
-#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 ((okl4_tracepoint_evt_t)0x3aU)
+#define OKL4_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 ((okl4_tracepoint_evt_t)0x3cU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) */
-#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED ((okl4_tracepoint_evt_t)0x3bU)
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED ((okl4_tracepoint_evt_t)0x3dU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) */
-#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED ((okl4_tracepoint_evt_t)0x3cU)
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED ((okl4_tracepoint_evt_t)0x3eU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) */
-#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE ((okl4_tracepoint_evt_t)0x3dU)
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE ((okl4_tracepoint_evt_t)0x3fU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) */
-#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE ((okl4_tracepoint_evt_t)0x3eU)
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE ((okl4_tracepoint_evt_t)0x40U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) */
-#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA ((okl4_tracepoint_evt_t)0x3fU)
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA ((okl4_tracepoint_evt_t)0x41U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) */
-#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE ((okl4_tracepoint_evt_t)0x40U)
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE ((okl4_tracepoint_evt_t)0x42U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) */
-#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE ((okl4_tracepoint_evt_t)0x41U)
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE ((okl4_tracepoint_evt_t)0x43U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) */
-#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA ((okl4_tracepoint_evt_t)0x42U)
-/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) */
-#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND ((okl4_tracepoint_evt_t)0x43U)
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA ((okl4_tracepoint_evt_t)0x44U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_AFFINITY_GET) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_AFFINITY_GET ((okl4_tracepoint_evt_t)0x45U)
+/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_AFFINITY_SET) */
+#define OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_AFFINITY_SET ((okl4_tracepoint_evt_t)0x46U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL) */
-#define OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL ((okl4_tracepoint_evt_t)0x44U)
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL ((okl4_tracepoint_evt_t)0x47U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) */
-#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION ((okl4_tracepoint_evt_t)0x45U)
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION ((okl4_tracepoint_evt_t)0x48U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) */
-#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME ((okl4_tracepoint_evt_t)0x46U)
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME ((okl4_tracepoint_evt_t)0x49U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY) */
-#define OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY ((okl4_tracepoint_evt_t)0x47U)
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_QUERY ((okl4_tracepoint_evt_t)0x4aU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TIMER_START) */
-#define OKL4_TRACEPOINT_EVT_SWI_TIMER_START ((okl4_tracepoint_evt_t)0x48U)
+#define OKL4_TRACEPOINT_EVT_SWI_TIMER_START ((okl4_tracepoint_evt_t)0x4bU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) */
-#define OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC ((okl4_tracepoint_evt_t)0x49U)
+#define OKL4_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC ((okl4_tracepoint_evt_t)0x4cU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET) */
-#define OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET ((okl4_tracepoint_evt_t)0x4aU)
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_RESET ((okl4_tracepoint_evt_t)0x4dU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_START) */
-#define OKL4_TRACEPOINT_EVT_SWI_VCPU_START ((okl4_tracepoint_evt_t)0x4bU)
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_START ((okl4_tracepoint_evt_t)0x4eU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP) */
-#define OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP ((okl4_tracepoint_evt_t)0x4cU)
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_STOP ((okl4_tracepoint_evt_t)0x4fU)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) */
-#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE ((okl4_tracepoint_evt_t)0x4dU)
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE ((okl4_tracepoint_evt_t)0x50U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) */
-#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV ((okl4_tracepoint_evt_t)0x4eU)
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV ((okl4_tracepoint_evt_t)0x51U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) */
-#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE ((okl4_tracepoint_evt_t)0x4fU)
+#define OKL4_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE ((okl4_tracepoint_evt_t)0x52U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) */
-#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE ((okl4_tracepoint_evt_t)0x50U)
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE ((okl4_tracepoint_evt_t)0x53U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) */
-#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY ((okl4_tracepoint_evt_t)0x51U)
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY ((okl4_tracepoint_evt_t)0x54U)
/*lint -esym(621, OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE) */
-#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x52U)
+#define OKL4_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE ((okl4_tracepoint_evt_t)0x55U)
/**
Maximum enumeration value
*/
/*lint -esym(621, OKL4_TRACEPOINT_EVT_MAX) */
-#define OKL4_TRACEPOINT_EVT_MAX ((okl4_tracepoint_evt_t)0x52U)
+#define OKL4_TRACEPOINT_EVT_MAX ((okl4_tracepoint_evt_t)0x55U)
/**
Invalid enumeration value
*/
@@ -11473,6 +11288,8 @@ okl4_tracepoint_evt_is_element_of(okl4_tracepoint_evt_t var)
(var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_QUEUE) ||
(var == OKL4_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT) ||
(var == OKL4_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_DEBUG_RESUME) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_DEBUG_SUSPEND) ||
(var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) ||
(var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) ||
(var == OKL4_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) ||
@@ -11527,7 +11344,8 @@ okl4_tracepoint_evt_is_element_of(okl4_tracepoint_evt_t var)
(var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) ||
(var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) ||
(var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) ||
- (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_AFFINITY_GET) ||
+ (var == OKL4_TRACEPOINT_EVT_SWI_SCHEDULER_AFFINITY_SET) ||
(var == OKL4_TRACEPOINT_EVT_SWI_TIMER_CANCEL) ||
(var == OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) ||
(var == OKL4_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) ||
@@ -11767,7 +11585,7 @@ okl4_vfp_ops_is_element_of(okl4_vfp_ops_t var)
struct okl4_vfp_register {
- __attribute__((aligned(16))) uint8_t __bytes[16];
+ ALIGNED(16) uint8_t __bytes[16];
};
@@ -12102,6 +11920,14 @@ struct _okl4_sys_axon_trigger_send_return {
okl4_error_t error;
};
+struct _okl4_sys_debug_resume_return {
+ okl4_error_t error;
+};
+
+struct _okl4_sys_debug_suspend_return {
+ okl4_error_t error;
+};
+
struct _okl4_sys_interrupt_ack_return {
okl4_interrupt_number_t irq;
uint8_t source;
@@ -12337,7 +12163,12 @@ struct _okl4_sys_schedule_profile_vcpu_get_data_return {
okl4_error_t error;
};
-struct _okl4_sys_scheduler_suspend_return {
+struct _okl4_sys_scheduler_affinity_get_return {
+ okl4_error_t error;
+ okl4_cpu_id_t cpu_index;
+};
+
+struct _okl4_sys_scheduler_affinity_set_return {
okl4_error_t error;
};
@@ -12439,12 +12270,6 @@ GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_result_t) == 4U,
GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_result_t) == 4U,
__autogen_confused_about_alignof_arm_psci_result)
#endif
-GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_psci_suspend_state_t) == 4U,
- __autogen_confused_about_sizeof_arm_psci_suspend_state)
-#if !defined(LINTER)
-GLOBAL_STATIC_ASSERT(_Alignof(okl4_arm_psci_suspend_state_t) == 4U,
- __autogen_confused_about_alignof_arm_psci_suspend_state)
-#endif
GLOBAL_STATIC_ASSERT(sizeof(okl4_arm_sctlr_t) == 4U,
__autogen_confused_about_sizeof_arm_sctlr)
#if !defined(LINTER)
@@ -13199,6 +13024,12 @@ GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_RESIDENCY (0x10)
/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT) */
#define OKL4_ASM_ARM_PSCI_FUNCTION_PSCI_STAT_COUNT (0x11)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_RESET2) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_SYSTEM_RESET2 (0x12)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MEM_PROTECT) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MEM_PROTECT (0x13)
+/*lint -esym(621, OKL4_ASM_ARM_PSCI_FUNCTION_MEM_PROTECT_CHECK_RANGE) */
+#define OKL4_ASM_ARM_PSCI_FUNCTION_MEM_PROTECT_CHECK_RANGE (0x14)
/**
* okl4_arm_psci_result_t
@@ -13225,33 +13056,6 @@ GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
#define OKL4_ASM_ARM_PSCI_RESULT_NOT_SUPPORTED (0xffffffff)
/**
- * okl4_arm_psci_suspend_state_t
- **/
-
-/*lint -esym(621, OKL4_ARM_PSCI_POWER_LEVEL_CPU) */
-#define OKL4_ARM_PSCI_POWER_LEVEL_CPU (0)
-
-/*lint -esym(621, OKL4_ASM_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_ASM_MASK_STATE_ID_ARM_PSCI_SUSPEND_STATE (65535)
-/*lint -esym(621, OKL4_ASM_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_ASM_SHIFT_STATE_ID_ARM_PSCI_SUSPEND_STATE (0)
-/*lint -esym(621, OKL4_ASM_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_ASM_WIDTH_STATE_ID_ARM_PSCI_SUSPEND_STATE (16)
-/*lint -esym(621, OKL4_ASM_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_ASM_MASK_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1 << 16)
-/*lint -esym(621, OKL4_ASM_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_ASM_SHIFT_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (16)
-/*lint -esym(621, OKL4_ASM_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_ASM_WIDTH_POWER_DOWN_ARM_PSCI_SUSPEND_STATE (1)
-/*lint -esym(621, OKL4_ASM_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_ASM_MASK_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (3 << 24)
-/*lint -esym(621, OKL4_ASM_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_ASM_SHIFT_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (24)
-/*lint -esym(621, OKL4_ASM_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE) */
-#define OKL4_ASM_WIDTH_POWER_LEVEL_ARM_PSCI_SUSPEND_STATE (2)
-
-
-/**
* okl4_arm_sctlr_t
**/
@@ -13376,12 +13180,12 @@ GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
#define OKL4_ASM_SHIFT_RESERVED22_ARM_SCTLR (22)
/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED22_ARM_SCTLR) */
#define OKL4_ASM_WIDTH_RESERVED22_ARM_SCTLR (1)
-/*lint -esym(621, OKL4_ASM_MASK_RESERVED23_ARM_SCTLR) */
-#define OKL4_ASM_MASK_RESERVED23_ARM_SCTLR (1 << 23)
-/*lint -esym(621, OKL4_ASM_SHIFT_RESERVED23_ARM_SCTLR) */
-#define OKL4_ASM_SHIFT_RESERVED23_ARM_SCTLR (23)
-/*lint -esym(621, OKL4_ASM_WIDTH_RESERVED23_ARM_SCTLR) */
-#define OKL4_ASM_WIDTH_RESERVED23_ARM_SCTLR (1)
+/*lint -esym(621, OKL4_ASM_MASK_SPAN_ARM_SCTLR) */
+#define OKL4_ASM_MASK_SPAN_ARM_SCTLR (1 << 23)
+/*lint -esym(621, OKL4_ASM_SHIFT_SPAN_ARM_SCTLR) */
+#define OKL4_ASM_SHIFT_SPAN_ARM_SCTLR (23)
+/*lint -esym(621, OKL4_ASM_WIDTH_SPAN_ARM_SCTLR) */
+#define OKL4_ASM_WIDTH_SPAN_ARM_SCTLR (1)
/*lint -esym(621, OKL4_ASM_MASK_EL0_ENDIANNESS_ARM_SCTLR) */
#define OKL4_ASM_MASK_EL0_ENDIANNESS_ARM_SCTLR (1 << 24)
/*lint -esym(621, OKL4_ASM_SHIFT_EL0_ENDIANNESS_ARM_SCTLR) */
@@ -15100,12 +14904,6 @@ GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
/*lint -esym(621, OKL4_ASM_ERROR_INVALID_DESIGNATOR) */
#define OKL4_ASM_ERROR_INVALID_DESIGNATOR (0x11)
/**
- The operation failed because the power_state
- argument is invalid.
-*/
-/*lint -esym(621, OKL4_ASM_ERROR_INVALID_POWER_STATE) */
-#define OKL4_ASM_ERROR_INVALID_POWER_STATE (0x12)
-/**
The operation failed because the given segment index does
not correspond to an attached physical segment.
*/
@@ -15136,12 +14934,6 @@ GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
/*lint -esym(621, OKL4_ASM_ERROR_NOT_IN_SEGMENT) */
#define OKL4_ASM_ERROR_NOT_IN_SEGMENT (0x17)
/**
- The operation failed because the caller is not on the last
- online cpu.
-*/
-/*lint -esym(621, OKL4_ASM_ERROR_NOT_LAST_CPU) */
-#define OKL4_ASM_ERROR_NOT_LAST_CPU (0x18)
-/**
Insufficient resources are available to perform the operation.
*/
/*lint -esym(621, OKL4_ASM_ERROR_NO_RESOURCES) */
@@ -15172,12 +14964,6 @@ GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
/*lint -esym(621, OKL4_ASM_ERROR_PIPE_RECV_OVERFLOW) */
#define OKL4_ASM_ERROR_PIPE_RECV_OVERFLOW (0x1e)
/**
- The operation failed because at least one VCPU has a monitored
- power state and is not currently suspended.
-*/
-/*lint -esym(621, OKL4_ASM_ERROR_POWER_VCPU_RESUMED) */
-#define OKL4_ASM_ERROR_POWER_VCPU_RESUMED (0x1f)
-/**
The operation requires a segment to be unused, or not attached
to an MMU context.
*/
@@ -15212,6 +14998,11 @@ GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
/*lint -esym(621, OKL4_ASM_ERROR_ALLOC_EXHAUSTED) */
#define OKL4_ASM_ERROR_ALLOC_EXHAUSTED (0x26)
/**
+ The operation failed because the affinity argument is invalid.
+*/
+/*lint -esym(621, OKL4_ASM_ERROR_INVALID_AFFINITY) */
+#define OKL4_ASM_ERROR_INVALID_AFFINITY (0x27)
+/**
KSP specific error 0
*/
/*lint -esym(621, OKL4_ASM_ERROR_KSP_ERROR_0) */
@@ -15542,13 +15333,16 @@ GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
* okl4_power_state_t
**/
/*lint -esym(621, OKL4_POWER_STATE_IDLE) */
-#define OKL4_POWER_STATE_IDLE (0)
+#define OKL4_POWER_STATE_IDLE (2147483648)
-/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_BASE) */
-#define OKL4_POWER_STATE_PLATFORM_BASE (256)
+/*lint -esym(621, OKL4_POWER_STATE_PLATFORM_LAST) */
+#define OKL4_POWER_STATE_PLATFORM_LAST (2147483647)
/*lint -esym(621, OKL4_POWER_STATE_POWEROFF) */
-#define OKL4_POWER_STATE_POWEROFF (1)
+#define OKL4_POWER_STATE_POWEROFF (2147483650)
+
+/*lint -esym(621, OKL4_POWER_STATE_RETENTION) */
+#define OKL4_POWER_STATE_RETENTION (2147483649)
/**
* okl4_register_set_t
@@ -15831,151 +15625,157 @@ GLOBAL_STATIC_ASSERT(_Alignof(struct okl4_vservices_transports) == 8U,
#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_SET_SEND_SEGMENT (0xb)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND) */
#define OKL4_ASM_TRACEPOINT_EVT_SWI_AXON_TRIGGER_SEND (0xc)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_DEBUG_RESUME) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_DEBUG_RESUME (0xd)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_DEBUG_SUSPEND) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_DEBUG_SUSPEND (0xe)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ACK) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ACK (0xd)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ACK (0xf)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE (0xe)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_PRIVATE (0x10)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED (0xf)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_ATTACH_SHARED (0x11)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH (0x10)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DETACH (0x12)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE (0x11)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_DIST_ENABLE (0x13)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_EOI) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_EOI (0x12)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_EOI (0x14)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING (0x13)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_HIGHEST_PRIORITY_PENDING (0x15)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD (0x14)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_GET_PAYLOAD (0x16)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS (0x15)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_LIMITS (0x17)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_MASK) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_MASK (0x16)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_MASK (0x18)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE (0x17)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_RAISE (0x19)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT (0x18)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_BINARY_POINT (0x1a)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG (0x19)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONFIG (0x1b)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL (0x1a)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_CONTROL (0x1c)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY (0x1b)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY (0x1d)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK (0x1c)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_PRIORITY_MASK (0x1e)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS (0x1d)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_SET_TARGETS (0x1f)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK (0x1e)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_INTERRUPT_UNMASK (0x20)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_INTERACT) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_INTERACT (0x1f)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_INTERACT (0x21)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME (0x20)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KDB_SET_OBJECT_NAME (0x22)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL (0x21)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_KSP_PROCEDURE_CALL (0x23)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT (0x22)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_ATTACH_SEGMENT (0x24)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT (0x23)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_DETACH_SEGMENT (0x25)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE (0x24)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE (0x26)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN (0x25)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_FLUSH_RANGE_PN (0x27)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE (0x26)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PAGE (0x28)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN (0x27)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_LOOKUP_PN (0x29)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE (0x28)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PAGE (0x2a)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PN) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PN (0x29)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_MAP_PN (0x2b)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE (0x2a)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PAGE (0x2c)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN (0x2b)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UNMAP_PN (0x2d)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS (0x2c)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_ATTRS (0x2e)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS (0x2d)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PAGE_PERMS (0x2f)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS (0x2e)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_ATTRS (0x30)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS (0x2f)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_MMU_UPDATE_PN_PERMS (0x31)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL (0x30)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PERFORMANCE_NULL_SYSCALL (0x32)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_CONTROL) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_CONTROL (0x31)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_CONTROL (0x33)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_RECV) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_RECV (0x32)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_RECV (0x34)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_SEND) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_SEND (0x33)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PIPE_SEND (0x35)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE (0x34)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_PRIORITY_WAIVE (0x36)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER (0x35)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTER (0x37)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS (0x36)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_GET_REGISTERS (0x38)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 (0x37)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_READ_MEMORY32 (0x39)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER (0x38)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTER (0x3a)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS (0x39)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_SET_REGISTERS (0x3b)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 (0x3a)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_REMOTE_WRITE_MEMORY32 (0x3c)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED (0x3b)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_STATUS_SUSPENDED (0x3d)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED (0x3c)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_METRICS_WATCH_SUSPENDED (0x3e)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE (0x3d)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_DISABLE (0x3f)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE (0x3e)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_ENABLE (0x40)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA (0x3f)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_CPU_GET_DATA (0x41)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE (0x40)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_DISABLE (0x42)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE (0x41)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_ENABLE (0x43)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA (0x42)
-/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_SUSPEND (0x43)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULE_PROFILE_VCPU_GET_DATA (0x44)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_AFFINITY_GET) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_AFFINITY_GET (0x45)
+/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_AFFINITY_SET) */
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_SCHEDULER_AFFINITY_SET (0x46)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_CANCEL) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_CANCEL (0x44)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_CANCEL (0x47)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION (0x45)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_RESOLUTION (0x48)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_TIME) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_TIME (0x46)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_GET_TIME (0x49)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_QUERY) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_QUERY (0x47)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_QUERY (0x4a)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_START) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_START (0x48)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TIMER_START (0x4b)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC (0x49)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_TRACEBUFFER_SYNC (0x4c)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_RESET) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_RESET (0x4a)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_RESET (0x4d)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_START) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_START (0x4b)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_START (0x4e)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_STOP) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_STOP (0x4c)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_STOP (0x4f)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE (0x4d)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SWITCH_MODE (0x50)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV (0x4e)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_SEV (0x51)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE (0x4f)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VCPU_SYNC_WFE (0x52)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE (0x50)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_CLEAR_AND_RAISE (0x53)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY (0x51)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_MODIFY (0x54)
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE) */
-#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE (0x52)
+#define OKL4_ASM_TRACEPOINT_EVT_SWI_VINTERRUPT_RAISE (0x55)
/**
Maximum enumeration value
*/
/*lint -esym(621, OKL4_ASM_TRACEPOINT_EVT_MAX) */
-#define OKL4_ASM_TRACEPOINT_EVT_MAX (0x52)
+#define OKL4_ASM_TRACEPOINT_EVT_MAX (0x55)
/**
Invalid enumeration value
*/
diff --git a/include/microvisor/microvisor.h b/include/microvisor/microvisor.h
index 3bb8d64b7dc8..b3da007ccc41 100644
--- a/include/microvisor/microvisor.h
+++ b/include/microvisor/microvisor.h
@@ -46,6 +46,11 @@
#define OKL4_FORCE_INLINE static inline __attribute__((always_inline))
#endif
+#if !defined(ALIGNED)
+#define _OKL4_CPP_ALIGNED
+#define ALIGNED(x) __attribute__((__aligned__(x)))
+#endif
+
#include <microvisor/kernel/types.h>
#include <microvisor/kernel/microvisor.h>
#include <microvisor/kernel/syscalls.h>
@@ -61,4 +66,9 @@
#undef _OKL4_CPP_BOOL
#endif
+#if defined(_OKL4_CPP_ALIGNED)
+#undef ALIGNED
+#undef _OKL4_CPP_ALIGNED
+#endif
+
#endif /* _MICROVISOR_H_ */
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 31b4cceec198..0732a6fd33d1 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -142,5 +142,7 @@ extern unsigned int icnss_socinfo_get_serial_number(struct device *dev);
extern bool icnss_is_qmi_disable(struct device *dev);
extern bool icnss_is_fw_ready(void);
extern bool icnss_is_fw_down(void);
+extern bool icnss_is_rejuvenate(void);
extern int icnss_trigger_recovery(struct device *dev);
+extern void icnss_block_shutdown(bool status);
#endif /* _ICNSS_WLAN_H_ */
diff --git a/include/trace/events/hyp_core_ctl.h b/include/trace/events/hyp_core_ctl.h
index 3a159e8f36a5..22ddf185fda6 100644
--- a/include/trace/events/hyp_core_ctl.h
+++ b/include/trace/events/hyp_core_ctl.h
@@ -48,6 +48,7 @@ TRACE_EVENT(hyp_core_ctl_status,
__array(char, our_isolated, 32)
__array(char, online, 32)
__array(char, isolated, 32)
+ __array(char, thermal, 32)
),
TP_fast_assign(
@@ -62,11 +63,14 @@ TRACE_EVENT(hyp_core_ctl_status,
cpumask_pr_args(cpu_online_mask));
scnprintf(__entry->isolated, sizeof(__entry->reserve), "%*pbl",
cpumask_pr_args(cpu_isolated_mask));
+ scnprintf(__entry->thermal, sizeof(__entry->reserve), "%*pbl",
+ cpumask_pr_args(cpu_cooling_get_max_level_cpumask()));
),
- TP_printk("event=%s reserve=%s reserved=%s our_isolated=%s online=%s isolated=%s",
+ TP_printk("event=%s reserve=%s reserved=%s our_isolated=%s online=%s isolated=%s thermal=%s",
__get_str(event), __entry->reserve, __entry->reserved,
- __entry->our_isolated, __entry->online, __entry->isolated)
+ __entry->our_isolated, __entry->online, __entry->isolated,
+ __entry->thermal)
);
#endif /* _TRACE_HYP_CORE_CTL_H */
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 5539933b3491..993bb463ef96 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -87,6 +87,14 @@ enum flat_binder_object_flags {
* scheduling policy from the caller (for synchronous transactions).
*/
FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
+
+ /**
+ * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
+ *
+ * Only when set, causes senders to include their security
+ * context
+ */
+ FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
};
#ifdef BINDER_IPC_32BIT
@@ -254,6 +262,7 @@ struct binder_node_debug_info {
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
+#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
/*
* NOTE: Two special error codes you should check for when calling
@@ -312,6 +321,11 @@ struct binder_transaction_data {
} data;
};
+struct binder_transaction_data_secctx {
+ struct binder_transaction_data transaction_data;
+ binder_uintptr_t secctx;
+};
+
struct binder_transaction_data_sg {
struct binder_transaction_data transaction_data;
binder_size_t buffers_size;
@@ -347,6 +361,11 @@ enum binder_driver_return_protocol {
BR_OK = _IO('r', 1),
/* No parameters! */
+ BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
+ struct binder_transaction_data_secctx),
+ /*
+ * binder_transaction_data_secctx: the received command.
+ */
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
/*
diff --git a/include/uapi/linux/scbuf.h b/include/uapi/linux/scbuf.h
new file mode 100644
index 000000000000..bc1a15c6042f
--- /dev/null
+++ b/include/uapi/linux/scbuf.h
@@ -0,0 +1,24 @@
+/*
+ * include/linux/scbuf.h
+ *
+ * Copyright (c) 2018 Cog Systems Pty Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Secure Camera Buffer client driver API.
+ *
+ * This driver exposes a single ioctl() operation which is used to obtain a
+ * file descriptor for a secure camera buffer that has been exported from
+ * another VM.
+ */
+
+#ifndef _UAPI__SCBUF_H_
+#define _UAPI__SCBUF_H_
+
+#include <linux/ioctl.h>
+
+#define IOCTL_SCBUF_LOOKUP_HANDLE _IO('4', 0x50)
+
+#endif /* _UAPI__SCBUF_H_ */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 841c40af4d47..ae65649964ff 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -262,6 +262,9 @@ struct cam_req_mgr_link_control {
((idx & CAM_MEM_MGR_HDL_IDX_MASK) | \
(fd << (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE))) \
+#define GET_FD_FROM_HANDLE(hdl) \
+ (hdl >> (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE)) \
+
#define CAM_MEM_MGR_GET_HDL_IDX(hdl) (hdl & CAM_MEM_MGR_HDL_IDX_MASK)
#define CAM_MEM_MGR_SET_SECURE_HDL(hdl, flag) \
diff --git a/include/uapi/media/cam_sync.h b/include/uapi/media/cam_sync.h
index 003c9ad5ce20..4a8781fc823d 100644
--- a/include/uapi/media/cam_sync.h
+++ b/include/uapi/media/cam_sync.h
@@ -117,7 +117,7 @@ struct cam_private_ioctl_arg {
__u32 size;
__u32 result;
__u32 reserved;
- __user __u64 ioctl_ptr;
+ __u64 ioctl_ptr;
};
#define CAM_PRIVATE_IOCTL_CMD \
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 1d1e7bfe1a2e..5648d3547746 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1450,7 +1450,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
},
[CPUHP_AP_PERF_ONLINE] = {
.name = "perf:online",
- .startup.single = perf_event_init_cpu,
+ .startup.single = perf_event_restart_events,
.teardown.single = perf_event_exit_cpu,
},
[CPUHP_AP_WORKQUEUE_ONLINE] = {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index af9159a3bc9b..2714a17c80a9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -847,6 +847,20 @@ void rebuild_sched_domains(void)
put_online_cpus();
}
+static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
+ const struct cpumask *new_mask)
+{
+ int ret;
+
+ if (cpumask_subset(&p->cpus_requested, cs->cpus_requested)) {
+ ret = set_cpus_allowed_ptr(p, &p->cpus_requested);
+ if (!ret)
+ return ret;
+ }
+
+ return set_cpus_allowed_ptr(p, new_mask);
+}
+
/**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
@@ -862,7 +876,7 @@ static void update_tasks_cpumask(struct cpuset *cs)
css_task_iter_start(&cs->css, &it);
while ((task = css_task_iter_next(&it)))
- set_cpus_allowed_ptr(task, cs->effective_cpus);
+ update_cpus_allowed(cs, task, cs->effective_cpus);
css_task_iter_end(&it);
}
@@ -1545,7 +1559,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
*/
- WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
+ WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach));
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
cpuset_update_task_spread_flag(cs, task);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index cd941f854142..ec0b30cdd62d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2304,6 +2304,23 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
perf_pmu_enable(cpuctx->ctx.pmu);
}
+#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
+static LIST_HEAD(dormant_event_list);
+static DEFINE_SPINLOCK(dormant_event_list_lock);
+
+static void perf_prepare_install_in_context(struct perf_event *event)
+{
+ spin_lock(&dormant_event_list_lock);
+ if (event->state == PERF_EVENT_STATE_DORMANT)
+ goto out;
+
+ event->state = PERF_EVENT_STATE_DORMANT;
+ list_add_tail(&event->dormant_event_entry, &dormant_event_list);
+out:
+ spin_unlock(&dormant_event_list_lock);
+}
+#endif
+
/*
* Cross CPU call to install and enable a performance event
*
@@ -2449,6 +2466,34 @@ again:
raw_spin_unlock_irq(&ctx->lock);
}
+#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
+static void perf_deferred_install_in_context(int cpu)
+{
+ struct perf_event *event, *tmp;
+ struct perf_event_context *ctx;
+
+ spin_lock(&dormant_event_list_lock);
+ list_for_each_entry_safe(event, tmp, &dormant_event_list,
+ dormant_event_entry) {
+ if (cpu != event->cpu)
+ continue;
+
+ list_del(&event->dormant_event_entry);
+ event->state = PERF_EVENT_STATE_INACTIVE;
+ spin_unlock(&dormant_event_list_lock);
+
+ ctx = event->ctx;
+
+ mutex_lock(&ctx->mutex);
+ perf_install_in_context(ctx, event, cpu);
+ mutex_unlock(&ctx->mutex);
+
+ spin_lock(&dormant_event_list_lock);
+ }
+ spin_unlock(&dormant_event_list_lock);
+}
+#endif
+
/*
* Put a event into inactive state and update time fields.
* Enabling the leader of a group effectively enables all
@@ -4263,40 +4308,21 @@ static void put_event(struct perf_event *event)
}
/*
- * Maintain a zombie list to collect all the zombie events
- */
-#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
-static LIST_HEAD(zombie_list);
-static DEFINE_SPINLOCK(zombie_list_lock);
-#endif
-
-/*
* Kill an event dead; while event:refcount will preserve the event
* object, it will not preserve its functionality. Once the last 'user'
* gives up the object, we'll destroy the thing.
*/
-int perf_event_release_kernel(struct perf_event *event)
+static int __perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *child, *tmp;
- /*
- * If the cpu associated to this event is offline, set the event as a
- * zombie event. The cleanup of the cpu would be done if the CPU is
- * back online.
- */
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
- if (event->cpu != -1 && !cpu_online(event->cpu)) {
- if (event->state == PERF_EVENT_STATE_ZOMBIE)
- return 0;
-
- event->state = PERF_EVENT_STATE_ZOMBIE;
-
- spin_lock(&zombie_list_lock);
- list_add_tail(&event->zombie_entry, &zombie_list);
- spin_unlock(&zombie_list_lock);
-
- return 0;
+ if (event->cpu != -1) {
+ spin_lock(&dormant_event_list_lock);
+ if (event->state == PERF_EVENT_STATE_DORMANT)
+ list_del(&event->dormant_event_entry);
+ spin_unlock(&dormant_event_list_lock);
}
#endif
@@ -4403,6 +4429,17 @@ no_ctx:
put_event(event); /* Must be the 'last' reference */
return 0;
}
+
+int perf_event_release_kernel(struct perf_event *event)
+{
+ int ret;
+
+ mutex_lock(&pmus_lock);
+ ret = __perf_event_release_kernel(event);
+ mutex_unlock(&pmus_lock);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
/*
@@ -4602,6 +4639,15 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
struct perf_event_context *ctx;
int ret;
+#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
+ spin_lock(&dormant_event_list_lock);
+ if (event->state == PERF_EVENT_STATE_DORMANT) {
+ spin_unlock(&dormant_event_list_lock);
+ return 0;
+ }
+ spin_unlock(&dormant_event_list_lock);
+#endif
+
ctx = perf_event_ctx_lock(event);
ret = __perf_read(event, buf, count);
perf_event_ctx_unlock(event, ctx);
@@ -9425,13 +9471,13 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
mutex_init(&event->child_mutex);
INIT_LIST_HEAD(&event->child_list);
+ INIT_LIST_HEAD(&event->dormant_event_entry);
INIT_LIST_HEAD(&event->group_entry);
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
INIT_LIST_HEAD(&event->rb_entry);
INIT_LIST_HEAD(&event->active_entry);
INIT_LIST_HEAD(&event->addr_filters.list);
- INIT_LIST_HEAD(&event->zombie_entry);
INIT_HLIST_NODE(&event->hlist_entry);
@@ -11078,111 +11124,35 @@ int perf_event_init_cpu(unsigned int cpu)
}
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
-static void
-check_hotplug_start_event(struct perf_event *event)
-{
- if (event->pmu->events_across_hotplug &&
- event->attr.type == PERF_TYPE_SOFTWARE &&
- event->pmu->start)
- event->pmu->start(event, 0);
-}
-
-static void perf_event_zombie_cleanup(unsigned int cpu)
-{
- struct perf_event *event, *tmp;
-
- spin_lock(&zombie_list_lock);
-
- list_for_each_entry_safe(event, tmp, &zombie_list, zombie_entry) {
- if (event->cpu != cpu)
- continue;
-
- list_del(&event->zombie_entry);
- spin_unlock(&zombie_list_lock);
-
- /*
- * The detachment of the event with the
- * PMU expects it to be in an active state
- */
- event->state = PERF_EVENT_STATE_ACTIVE;
- perf_event_release_kernel(event);
-
- spin_lock(&zombie_list_lock);
- }
-
- spin_unlock(&zombie_list_lock);
-}
-
-static int perf_event_start_swevents(unsigned int cpu)
+int perf_event_restart_events(unsigned int cpu)
{
- struct perf_event_context *ctx;
- struct pmu *pmu;
- struct perf_event *event;
- int idx;
-
- perf_event_zombie_cleanup(cpu);
-
- idx = srcu_read_lock(&pmus_srcu);
- list_for_each_entry_rcu(pmu, &pmus, entry) {
- ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
- mutex_lock(&ctx->mutex);
- raw_spin_lock(&ctx->lock);
- list_for_each_entry(event, &ctx->event_list, event_entry)
- check_hotplug_start_event(event);
- raw_spin_unlock(&ctx->lock);
- mutex_unlock(&ctx->mutex);
- }
- srcu_read_unlock(&pmus_srcu, idx);
+ mutex_lock(&pmus_lock);
per_cpu(is_hotplugging, cpu) = false;
- return 0;
-}
-
-/*
- * If keeping events across hotplugging is supported, do not
- * remove the event list so event lives beyond CPU hotplug.
- * The context is exited via an fd close path when userspace
- * is done and the target CPU is online. If software clock
- * event is active, then stop hrtimer associated with it.
- * Start the timer when the CPU comes back online.
- */
-static void
-check_hotplug_remove_from_context(struct perf_event *event,
- struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- if (event->pmu->events_across_hotplug &&
- event->attr.type == PERF_TYPE_SOFTWARE &&
- event->pmu->stop)
- event->pmu->stop(event, PERF_EF_UPDATE);
- else if (!event->pmu->events_across_hotplug)
- __perf_remove_from_context(event, cpuctx,
- ctx, (void *)DETACH_GROUP);
-}
-
-static void __perf_event_exit_context(void *__info)
-{
- struct perf_event_context *ctx = __info;
- struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
- struct perf_event *event;
+ perf_deferred_install_in_context(cpu);
+ mutex_unlock(&pmus_lock);
- raw_spin_lock(&ctx->lock);
- list_for_each_entry(event, &ctx->event_list, event_entry)
- check_hotplug_remove_from_context(event, cpuctx, ctx);
- raw_spin_unlock(&ctx->lock);
+ return 0;
}
static void perf_event_exit_cpu_context(int cpu)
{
struct perf_event_context *ctx;
+ struct perf_event *event, *event_tmp;
struct pmu *pmu;
int idx;
idx = srcu_read_lock(&pmus_srcu);
+ per_cpu(is_hotplugging, cpu) = true;
list_for_each_entry_rcu(pmu, &pmus, entry) {
ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
mutex_lock(&ctx->mutex);
- smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
+ list_for_each_entry_safe(event, event_tmp, &ctx->event_list,
+ event_entry) {
+ perf_remove_from_context(event, DETACH_GROUP);
+ if (event->pmu->events_across_hotplug)
+ perf_prepare_install_in_context(event);
+ }
mutex_unlock(&ctx->mutex);
}
srcu_read_unlock(&pmus_srcu, idx);
@@ -11195,8 +11165,10 @@ static void perf_event_exit_cpu_context(int cpu) { }
int perf_event_exit_cpu(unsigned int cpu)
{
- per_cpu(is_hotplugging, cpu) = true;
+
+ mutex_lock(&pmus_lock);
perf_event_exit_cpu_context(cpu);
+ mutex_unlock(&pmus_lock);
return 0;
}
@@ -11239,25 +11211,6 @@ static struct notifier_block perf_event_idle_nb = {
.notifier_call = event_idle_notif,
};
-#ifdef CONFIG_HOTPLUG_CPU
-static int perf_cpu_hp_init(void)
-{
- int ret;
-
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
- "PERF/CORE/CPUHP_AP_PERF_ONLINE",
- perf_event_start_swevents,
- perf_event_exit_cpu);
- if (ret)
- pr_err("CPU hotplug notifier for perf core could not be registered: %d\n",
- ret);
-
- return ret;
-}
-#else
-static int perf_cpu_hp_init(void) { return 0; }
-#endif
-
void __init perf_event_init(void)
{
int ret, cpu;
@@ -11284,8 +11237,6 @@ void __init perf_event_init(void)
perf_event_init_cpu(smp_processor_id());
idle_notifier_register(&perf_event_idle_nb);
register_reboot_notifier(&perf_reboot_notifier);
- ret = perf_cpu_hp_init();
- WARN(ret, "core perf_cpu_hp_init() failed with: %d", ret);
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a63e49308890..ac6f9eed544d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5044,6 +5044,9 @@ again:
retval = -EINVAL;
}
+ if (!retval && !(p->flags & PF_KTHREAD))
+ cpumask_and(&p->cpus_requested, in_mask, cpu_possible_mask);
+
out_free_new_mask:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
@@ -6449,6 +6452,19 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
call_rcu_sched(&old_rd->rcu, free_rootdomain);
}
+void sched_get_rd(struct root_domain *rd)
+{
+ atomic_inc(&rd->refcount);
+}
+
+void sched_put_rd(struct root_domain *rd)
+{
+ if (!atomic_dec_and_test(&rd->refcount))
+ return;
+
+ call_rcu_sched(&rd->rcu, free_rootdomain);
+}
+
static int init_rootdomain(struct root_domain *rd)
{
memset(rd, 0, sizeof(*rd));
@@ -8052,6 +8068,7 @@ int sched_cpu_activate(unsigned int cpu)
raw_spin_unlock_irqrestore(&rq->lock, flags);
update_max_interval();
+ walt_update_min_max_capacity();
return 0;
}
@@ -8085,6 +8102,7 @@ int sched_cpu_deactivate(unsigned int cpu)
return ret;
}
sched_domains_numa_masks_clear(cpu);
+ walt_update_min_max_capacity();
return 0;
}
@@ -8161,6 +8179,7 @@ void __init sched_init_smp(void)
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
BUG();
+ cpumask_copy(&current->cpus_requested, cpu_possible_mask);
sched_init_granularity();
free_cpumask_var(non_isolated_cpus);
diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c
index 420cb5254689..77d8361a7c21 100644
--- a/kernel/sched/energy.c
+++ b/kernel/sched/energy.c
@@ -150,6 +150,7 @@ static int sched_energy_probe(struct platform_device *pdev)
int cpu;
unsigned long *max_frequencies = NULL;
int ret;
+ bool is_sge_valid = false;
if (!sched_is_energy_aware())
return 0;
@@ -248,6 +249,7 @@ static int sched_energy_probe(struct platform_device *pdev)
sge_l0->cap_states[i].power);
}
+ is_sge_valid = true;
dev_info(&pdev->dev,
"cpu=%d eff=%d [freq=%ld cap=%ld power_d0=%ld] -> [freq=%ld cap=%ld power_d0=%ld]\n",
cpu, efficiency,
@@ -271,7 +273,8 @@ static int sched_energy_probe(struct platform_device *pdev)
kfree(max_frequencies);
- walt_sched_energy_populated_callback();
+ if (is_sge_valid)
+ walt_sched_energy_populated_callback();
dev_info(&pdev->dev, "Sched-energy-costs capacity updated\n");
return 0;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 73f11c478ecb..e6abbb4725eb 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2261,8 +2261,11 @@ static void tell_cpu_to_push(struct rq *rq)
rto_start_unlock(&rq->rd->rto_loop_start);
- if (cpu >= 0)
+ if (cpu >= 0) {
+ /* Make sure the rd does not get freed while pushing */
+ sched_get_rd(rq->rd);
irq_work_queue_on(&rq->rd->rto_push_work, cpu);
+ }
}
/* Called from hardirq context */
@@ -2292,8 +2295,10 @@ void rto_push_irq_work_func(struct irq_work *work)
raw_spin_unlock(&rd->rto_lock);
- if (cpu < 0)
+ if (cpu < 0) {
+ sched_put_rd(rd);
return;
+ }
/* Try the next RT overloaded CPU */
irq_work_queue_on(&rd->rto_push_work, cpu);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 79f55a18ffa6..01558edc1dc8 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -679,6 +679,8 @@ struct root_domain {
};
extern struct root_domain def_root_domain;
+extern void sched_get_rd(struct root_domain *rd);
+extern void sched_put_rd(struct root_domain *rd);
#ifdef HAVE_RT_PUSH_IPI
extern void rto_push_irq_work_func(struct irq_work *work);
@@ -2479,7 +2481,11 @@ static inline void __update_min_max_capacity(void)
int i;
int max_cap = 0, min_cap = INT_MAX;
- for_each_online_cpu(i) {
+ for_each_possible_cpu(i) {
+
+ if (!cpu_active(i))
+ continue;
+
max_cap = max(max_cap, cpu_capacity(i));
min_cap = min(min_cap, cpu_capacity(i));
}
@@ -2744,6 +2750,7 @@ static inline unsigned int power_cost(int cpu, bool max)
}
extern void walt_sched_energy_populated_callback(void);
+extern void walt_update_min_max_capacity(void);
#else /* CONFIG_SCHED_WALT */
@@ -2880,6 +2887,7 @@ static inline unsigned int power_cost(int cpu, bool max)
}
static inline void walt_sched_energy_populated_callback(void) { }
+static inline void walt_update_min_max_capacity(void) { }
#endif /* CONFIG_SCHED_WALT */
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index a9fb3678fdb1..04d8f8c0c873 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -2146,7 +2146,7 @@ static int compute_max_possible_capacity(struct sched_cluster *cluster)
return capacity;
}
-static void update_min_max_capacity(void)
+void walt_update_min_max_capacity(void)
{
unsigned long flags;
@@ -2372,7 +2372,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
return 0;
if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
- update_min_max_capacity();
+ walt_update_min_max_capacity();
return 0;
}
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 10c5a3a8d638..653cf6dda36b 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -106,6 +106,7 @@ static int smpboot_thread_fn(void *data)
{
struct smpboot_thread_data *td = data;
struct smp_hotplug_thread *ht = td->ht;
+ unsigned long flags;
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -157,9 +158,9 @@ static int smpboot_thread_fn(void *data)
* p->state = TASK_RUNNING;
* schedule();
*/
- raw_spin_lock(&current->pi_lock);
+ raw_spin_lock_irqsave(&current->pi_lock, flags);
__set_current_state(TASK_RUNNING);
- raw_spin_unlock(&current->pi_lock);
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
preempt_enable();
if (ht->park && td->status == HP_THREAD_ACTIVE) {
BUG_ON(td->cpu != smp_processor_id());
diff --git a/lib/qmi_encdec.c b/lib/qmi_encdec.c
index d7221d898238..2808f7b72e3f 100644
--- a/lib/qmi_encdec.c
+++ b/lib/qmi_encdec.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -706,8 +706,8 @@ static int qmi_decode_string_elem(struct elem_info *ei_array, void *buf_dst,
decoded_bytes += rc;
}
- if (string_len > temp_ei->elem_len) {
- pr_err("%s: String len %d > Max Len %d\n",
+ if (string_len >= temp_ei->elem_len) {
+ pr_err("%s: String len %d >= Max Len %d\n",
__func__, string_len, temp_ei->elem_len);
return -ETOOSMALL;
} else if (string_len > tlv_len) {
diff --git a/mm/debug.c b/mm/debug.c
index 9feb699c5d25..bebe48aece6d 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -95,7 +95,7 @@ EXPORT_SYMBOL(dump_vma);
void dump_mm(const struct mm_struct *mm)
{
- pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
+ pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
#ifdef CONFIG_MMU
"get_unmapped_area %p\n"
#endif
@@ -125,7 +125,7 @@ void dump_mm(const struct mm_struct *mm)
#endif
"def_flags: %#lx(%pGv)\n",
- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
+ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
#ifdef CONFIG_MMU
mm->get_unmapped_area,
#endif
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ea618206c902..a6cf5e034b2f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1444,7 +1444,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
+ pmd_t *old_pmd, pmd_t *new_pmd)
{
spinlock_t *old_ptl, *new_ptl;
pmd_t pmd;
@@ -1475,7 +1475,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
- if (pmd_present(pmd) && pmd_dirty(pmd))
+ if (pmd_present(pmd))
force_flush = true;
VM_BUG_ON(!pmd_none(*new_pmd));
@@ -1486,12 +1486,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
}
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
- if (new_ptl != old_ptl)
- spin_unlock(new_ptl);
if (force_flush)
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
- else
- *need_flush = true;
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
spin_unlock(old_ptl);
return true;
}
diff --git a/mm/mremap.c b/mm/mremap.c
index 23027625a8c0..0c2bac50a42b 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -104,7 +104,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
unsigned long old_addr, unsigned long old_end,
struct vm_area_struct *new_vma, pmd_t *new_pmd,
- unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
+ unsigned long new_addr, bool need_rmap_locks)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
@@ -152,15 +152,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
pte = ptep_get_and_clear(mm, old_addr, old_pte);
/*
- * If we are remapping a dirty PTE, make sure
+ * If we are remapping a valid PTE, make sure
* to flush TLB before we drop the PTL for the
- * old PTE or we may race with page_mkclean().
+ * PTE.
*
- * This check has to be done after we removed the
- * old PTE from page tables or another thread may
- * dirty it after the check and before the removal.
+ * NOTE! Both old and new PTL matter: the old one
+ * for racing with page_mkclean(), the new one to
+ * make sure the physical page stays valid until
+ * the TLB entry for the old mapping has been
+ * flushed.
*/
- if (pte_present(pte) && pte_dirty(pte))
+ if (pte_present(pte))
force_flush = true;
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
pte = move_soft_dirty_pte(pte);
@@ -168,13 +170,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
}
arch_leave_lazy_mmu_mode();
+ if (force_flush)
+ flush_tlb_range(vma, old_end - len, old_end);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
pte_unmap(new_pte - 1);
- if (force_flush)
- flush_tlb_range(vma, old_end - len, old_end);
- else
- *need_flush = true;
pte_unmap_unlock(old_pte - 1, old_ptl);
if (need_rmap_locks)
drop_rmap_locks(vma);
@@ -189,7 +189,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
{
unsigned long extent, next, old_end;
pmd_t *old_pmd, *new_pmd;
- bool need_flush = false;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
@@ -220,8 +219,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (need_rmap_locks)
take_rmap_locks(vma);
moved = move_huge_pmd(vma, old_addr, new_addr,
- old_end, old_pmd, new_pmd,
- &need_flush);
+ old_end, old_pmd, new_pmd);
if (need_rmap_locks)
drop_rmap_locks(vma);
if (moved)
@@ -239,10 +237,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (extent > LATENCY_LIMIT)
extent = LATENCY_LIMIT;
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
- new_pmd, new_addr, need_rmap_locks, &need_flush);
+ new_pmd, new_addr, need_rmap_locks);
}
- if (need_flush)
- flush_tlb_range(vma, old_end-len, old_addr);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
diff --git a/mm/page_io.c b/mm/page_io.c
index a2651f58c86a..efe6fd67cb0e 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -56,7 +56,7 @@ void end_swap_bio_write(struct bio *bio)
* Also clear PG_reclaim to avoid rotate_reclaimable_page()
*/
set_page_dirty(page);
- pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
+ pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
imajor(bio->bi_bdev->bd_inode),
iminor(bio->bi_bdev->bd_inode),
(unsigned long long)bio->bi_iter.bi_sector);
diff --git a/mm/vmacache.c b/mm/vmacache.c
index 035fdeb35b43..c9ca3dd46b97 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -6,44 +6,6 @@
#include <linux/vmacache.h>
/*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
- struct task_struct *g, *p;
-
- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
-
- /*
- * Single threaded tasks need not iterate the entire
- * list of process. We can avoid the flushing as well
- * since the mm's seqnum was increased and don't have
- * to worry about other threads' seqnum. Current's
- * flush will occur upon the next lookup.
- */
- if (atomic_read(&mm->mm_users) == 1)
- return;
-
- rcu_read_lock();
- for_each_process_thread(g, p) {
- /*
- * Only flush the vmacache pointers as the
- * mm seqnum is already set and curr's will
- * be set upon invalidation when the next
- * lookup is done.
- */
- if (mm == p->mm)
- vmacache_flush(p);
- }
- rcu_read_unlock();
-}
-
-/*
* This task may be accessing a foreign mm via (for example)
* get_user_pages()->find_vma(). The vmacache is task-local and this
* task's vmacache pertains to a different mm (ie, its own). There is
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 6482b001f19a..784b76dbf364 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock)
return 0;
}
-static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
- gfp_t allocation, struct sock *sk)
+static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation,
+ struct sock *sk)
{
int err = -ENOBUFS;
- sock_hold(sk);
- if (*skb2 == NULL) {
- if (atomic_read(&skb->users) != 1) {
- *skb2 = skb_clone(skb, allocation);
- } else {
- *skb2 = skb;
- atomic_inc(&skb->users);
- }
- }
- if (*skb2 != NULL) {
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
- skb_set_owner_r(*skb2, sk);
- skb_queue_tail(&sk->sk_receive_queue, *skb2);
- sk->sk_data_ready(sk);
- *skb2 = NULL;
- err = 0;
- }
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
+ return err;
+
+ skb = skb_clone(skb, allocation);
+
+ if (skb) {
+ skb_set_owner_r(skb, sk);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk);
+ err = 0;
}
- sock_put(sk);
return err;
}
@@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
- struct sk_buff *skb2 = NULL;
int err = -ESRCH;
/* XXX Do we need something like netlink_overrun? I think
@@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
* socket.
*/
if (pfk->promisc)
- pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+ pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
/* the exact target will be processed later */
if (sk == one_sk)
@@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
continue;
}
- err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
+ err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
/* Error is cleared after successful sending to at least one
* registered KM */
@@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
rcu_read_unlock();
if (one_sk != NULL)
- err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+ err = pfkey_broadcast_one(skb, allocation, one_sk);
- kfree_skb(skb2);
kfree_skb(skb);
return err;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 3ce53cf82df5..dc0a82cc59c8 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1411,6 +1411,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
(ut[i].family != prev_family))
return -EINVAL;
+ if (ut[i].mode >= XFRM_MODE_MAX)
+ return -EINVAL;
+
prev_family = ut[i].family;
switch (ut[i].family) {
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index cdfa754abd5b..2d5ec3357906 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2618,7 +2618,7 @@ sub process {
$sig_nospace =~ s/\s//g;
$sig_nospace = lc($sig_nospace);
if (defined $signatures{$sig_nospace}) {
- WARN("BAD_SIGN_OFF",
+ WARN("DUPLICATE_SIGN_OFF",
"Duplicate signature\n" . $herecurr);
} else {
$signatures{$sig_nospace} = 1;
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
index d152ae57f0a5..e77324430cce 100644
--- a/security/pfe/pfk.c
+++ b/security/pfe/pfk.c
@@ -70,7 +70,9 @@ typedef int (*pfk_parse_inode_type)(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
- bool *is_pfe);
+ bool *is_pfe,
+ unsigned int *data_unit,
+ const char *storage_type);
typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
const struct bio *bio2, const struct inode *inode1,
@@ -215,11 +217,7 @@ static struct inode *pfk_bio_get_inode(const struct bio *bio)
if (!page_mapping(bio->bi_io_vec->bv_page))
return NULL;
- if (!bio->bi_io_vec->bv_page->mapping->host)
-
- return NULL;
-
- return bio->bi_io_vec->bv_page->mapping->host;
+ return page_mapping(bio->bi_io_vec->bv_page)->host;
}
/**
@@ -280,21 +278,24 @@ bool pfe_is_inode_filesystem_type(const struct inode *inode,
static int pfk_get_key_for_bio(const struct bio *bio,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo_mode,
- bool *is_pfe)
+ bool *is_pfe, unsigned int *data_unit)
{
const struct inode *inode;
enum pfe_type which_pfe;
const struct blk_encryption_key *key;
+ char *s_type = NULL;
inode = pfk_bio_get_inode(bio);
which_pfe = pfk_get_pfe_type(inode);
+ s_type = (char *)pfk_kc_get_storage_type();
if (which_pfe != INVALID_PFE) {
/* Encrypted file; override ->bi_crypt_key */
pr_debug("parsing inode %lu with PFE type %d\n",
inode->i_ino, which_pfe);
return (*(pfk_parse_inode_ftable[which_pfe]))
- (bio, inode, key_info, algo_mode, is_pfe);
+ (bio, inode, key_info, algo_mode, is_pfe,
+ data_unit, (const char *)s_type);
}
/*
@@ -347,6 +348,7 @@ int pfk_load_key_start(const struct bio *bio,
struct pfk_key_info key_info = {NULL, NULL, 0, 0};
enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
enum ice_crpto_key_size key_size_type = 0;
+ unsigned int data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B;
u32 key_index = 0;
if (!is_pfe) {
@@ -369,7 +371,8 @@ int pfk_load_key_start(const struct bio *bio,
return -EINVAL;
}
- ret = pfk_get_key_for_bio(bio, &key_info, &algo_mode, is_pfe);
+ ret = pfk_get_key_for_bio(bio, &key_info, &algo_mode, is_pfe,
+ &data_unit);
if (ret != 0)
return ret;
@@ -379,7 +382,8 @@ int pfk_load_key_start(const struct bio *bio,
return ret;
ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
- key_info.salt, key_info.salt_size, &key_index, async);
+ key_info.salt, key_info.salt_size, &key_index, async,
+ data_unit);
if (ret) {
if (ret != -EBUSY && ret != -EAGAIN)
pr_err("start: could not load key into pfk key cache, error %d\n",
@@ -430,7 +434,7 @@ int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
if (!pfk_is_ready())
return -ENODEV;
- ret = pfk_get_key_for_bio(bio, &key_info, NULL, is_pfe);
+ ret = pfk_get_key_for_bio(bio, &key_info, NULL, is_pfe, NULL);
if (ret != 0)
return ret;
diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c
index d505bcb10dab..f81f56833a80 100644
--- a/security/pfe/pfk_ext4.c
+++ b/security/pfe/pfk_ext4.c
@@ -119,7 +119,9 @@ int pfk_ext4_parse_inode(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
- bool *is_pfe)
+ bool *is_pfe,
+ unsigned int *data_unit,
+ const char *storage_type)
{
int ret = 0;
@@ -133,6 +135,19 @@ int pfk_ext4_parse_inode(const struct bio *bio,
*/
*is_pfe = true;
+ /* Update dun based upon storage type.
+ * For ext4 FS UFS has 4k dun whereas eMMC
+ * uses 512Byte dun.
+ */
+ if (storage_type && data_unit) {
+ if (!memcmp(storage_type, "ufs", strlen("ufs")))
+ *data_unit = 1 << ICE_CRYPTO_DATA_UNIT_4_KB;
+ else if (!memcmp(storage_type, "sdcc", strlen("sdcc")))
+ *data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B;
+ else
+ return -EINVAL;
+ }
+
if (!pfk_ext4_is_ready())
return -ENODEV;
diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h
index c33232f35a14..e39d04d82df5 100644
--- a/security/pfe/pfk_ext4.h
+++ b/security/pfe/pfk_ext4.h
@@ -24,7 +24,9 @@ int pfk_ext4_parse_inode(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
- bool *is_pfe);
+ bool *is_pfe,
+ unsigned int *data_unit,
+ const char *storage_type);
bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
const struct bio *bio2, const struct inode *inode1,
diff --git a/security/pfe/pfk_f2fs.c b/security/pfe/pfk_f2fs.c
index 14f53d27c97f..48cca45db056 100644
--- a/security/pfe/pfk_f2fs.c
+++ b/security/pfe/pfk_f2fs.c
@@ -109,7 +109,9 @@ int pfk_f2fs_parse_inode(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
- bool *is_pfe)
+ bool *is_pfe,
+ unsigned int *data_unit,
+ const char *storage_type)
{
int ret = 0;
@@ -123,6 +125,18 @@ int pfk_f2fs_parse_inode(const struct bio *bio,
*/
*is_pfe = true;
+ /* Update the dun based upon storage type.
+ * Right now both UFS and eMMC storage uses 4KB dun
+ * for F2FS
+ */
+ if (storage_type && data_unit) {
+ if (!memcmp(storage_type, "ufs", strlen("ufs")) ||
+ !memcmp(storage_type, "sdcc", strlen("sdcc")))
+ *data_unit = 1 << ICE_CRYPTO_DATA_UNIT_4_KB;
+ else
+ return -EINVAL;
+ }
+
if (!pfk_f2fs_is_ready())
return -ENODEV;
diff --git a/security/pfe/pfk_f2fs.h b/security/pfe/pfk_f2fs.h
index 551d529bced6..2e0c21d16ea6 100644
--- a/security/pfe/pfk_f2fs.h
+++ b/security/pfe/pfk_f2fs.h
@@ -24,7 +24,9 @@ int pfk_f2fs_parse_inode(const struct bio *bio,
const struct inode *inode,
struct pfk_key_info *key_info,
enum ice_cryto_algo_mode *algo,
- bool *is_pfe);
+ bool *is_pfe,
+ unsigned int *data_unit,
+ const char *storage_type);
bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
const struct bio *bio2, const struct inode *inode1,
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
index a86042c98e1f..59c4adeaee47 100644
--- a/security/pfe/pfk_ice.c
+++ b/security/pfe/pfk_ice.c
@@ -26,11 +26,7 @@
#include "pfk_ice.h"
-/**********************************/
-/** global definitions **/
-/**********************************/
-
-#define TZ_ES_SET_ICE_KEY 0x2
+#define TZ_ES_CONFIG_SET_ICE_KEY 0x4
#define TZ_ES_INVALIDATE_ICE_KEY 0x3
/* index 0 and 1 is reserved for FDE */
@@ -38,44 +34,45 @@
#define MAX_ICE_KEY_INDEX 31
-
-#define TZ_ES_SET_ICE_KEY_ID \
- TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, TZ_ES_SET_ICE_KEY)
-
+#define TZ_ES_CONFIG_SET_ICE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \
+ TZ_ES_CONFIG_SET_ICE_KEY)
#define TZ_ES_INVALIDATE_ICE_KEY_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY)
-
-#define TZ_ES_SET_ICE_KEY_PARAM_ID \
+#define TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_5( \
TZ_SYSCALL_PARAM_TYPE_VAL, \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
- TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1( \
TZ_SYSCALL_PARAM_TYPE_VAL)
-#define ICE_KEY_SIZE 32
-#define ICE_SALT_SIZE 32
+#define ICE_BUFFER_SIZE 64
+
+enum {
+ TZ_CIPHER_MODE_XTS_128 = 0,
+ TZ_CIPHER_MODE_CBC_128 = 1,
+ TZ_CIPHER_MODE_XTS_256 = 3,
+ TZ_CIPHER_MODE_CBC_256 = 4
+};
-static uint8_t ice_key[ICE_KEY_SIZE];
-static uint8_t ice_salt[ICE_KEY_SIZE];
+static uint8_t ice_buffer[ICE_BUFFER_SIZE];
int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
- char *storage_type)
+ char *storage_type, unsigned int data_unit)
{
struct scm_desc desc = {0};
int ret, ret1;
- char *tzbuf_key = (char *)ice_key;
- char *tzbuf_salt = (char *)ice_salt;
+ char *tzbuf = (char *)ice_buffer;
char *s_type = storage_type;
uint32_t smc_id = 0;
- u32 tzbuflen_key = sizeof(ice_key);
- u32 tzbuflen_salt = sizeof(ice_salt);
+ u32 size = ICE_BUFFER_SIZE / 2;
if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
pr_err("%s Invalid index %d\n", __func__, index);
@@ -86,7 +83,7 @@ int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
return -EINVAL;
}
- if (!tzbuf_key || !tzbuf_salt) {
+ if (!tzbuf) {
pr_err("%s No Memory\n", __func__);
return -ENOMEM;
}
@@ -96,23 +93,21 @@ int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
return -EINVAL;
}
- memset(tzbuf_key, 0, tzbuflen_key);
- memset(tzbuf_salt, 0, tzbuflen_salt);
+ memset(tzbuf, 0, ICE_BUFFER_SIZE);
- memcpy(ice_key, key, tzbuflen_key);
- memcpy(ice_salt, salt, tzbuflen_salt);
+ memcpy(ice_buffer, key, size);
+ memcpy(ice_buffer + size, salt, size);
- dmac_flush_range(tzbuf_key, tzbuf_key + tzbuflen_key);
- dmac_flush_range(tzbuf_salt, tzbuf_salt + tzbuflen_salt);
+ dmac_flush_range(tzbuf, tzbuf + ICE_BUFFER_SIZE);
- smc_id = TZ_ES_SET_ICE_KEY_ID;
+ smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID;
- desc.arginfo = TZ_ES_SET_ICE_KEY_PARAM_ID;
+ desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID;
desc.args[0] = index;
- desc.args[1] = virt_to_phys(tzbuf_key);
- desc.args[2] = tzbuflen_key;
- desc.args[3] = virt_to_phys(tzbuf_salt);
- desc.args[4] = tzbuflen_salt;
+ desc.args[1] = virt_to_phys(tzbuf);
+ desc.args[2] = ICE_BUFFER_SIZE;
+ desc.args[3] = TZ_CIPHER_MODE_XTS_256;
+ desc.args[4] = data_unit;
ret = qcom_ice_setup_ice_hw((const char *)s_type, true);
diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h
index 31772e798636..8fd0d83b3ae0 100644
--- a/security/pfe/pfk_ice.h
+++ b/security/pfe/pfk_ice.h
@@ -26,7 +26,7 @@ int pfk_ice_init(void);
int pfk_ice_deinit(void);
int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
- char *storage_type);
+ char *storage_type, unsigned int data_unit);
int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type);
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
index eecc0260087d..a8e990926af3 100644
--- a/security/pfe/pfk_kc.c
+++ b/security/pfe/pfk_kc.c
@@ -132,6 +132,16 @@ static inline void kc_spin_unlock(void)
}
/**
+ * pfk_kc_get_storage_type() - return the hardware storage type.
+ *
+ * Return: storage type queried during bootup.
+ */
+const char *pfk_kc_get_storage_type(void)
+{
+ return s_type;
+}
+
+/**
* kc_entry_is_available() - checks whether the entry is available
*
* Return true if it is , false otherwise or if invalid
@@ -389,13 +399,15 @@ static void kc_clear_entry(struct kc_entry *entry)
* @key_size: key_size
* @salt: salt
* @salt_size: salt_size
+ * @data_unit: dun size
*
* The previous key is securely released and wiped, the new one is loaded
* to ICE.
* Should be invoked under spinlock
*/
static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
- size_t key_size, const unsigned char *salt, size_t salt_size)
+ size_t key_size, const unsigned char *salt, size_t salt_size,
+ unsigned int data_unit)
{
int ret;
@@ -412,7 +424,7 @@ static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
kc_spin_unlock();
ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
- entry->salt, s_type);
+ entry->salt, s_type, data_unit);
kc_spin_lock();
return ret;
@@ -478,7 +490,7 @@ int pfk_kc_deinit(void)
*/
int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size, u32 *key_index,
- bool async)
+ bool async, unsigned int data_unit)
{
int ret = 0;
struct kc_entry *entry = NULL;
@@ -543,7 +555,8 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
break;
}
case (FREE):
- ret = kc_update_entry(entry, key, key_size, salt, salt_size);
+ ret = kc_update_entry(entry, key, key_size, salt, salt_size,
+ data_unit);
if (ret) {
entry->state = SCM_ERROR;
entry->scm_error = ret;
diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h
index 6adeee2259cd..89d40be95fdf 100644
--- a/security/pfe/pfk_kc.h
+++ b/security/pfe/pfk_kc.h
@@ -19,7 +19,7 @@ int pfk_kc_init(void);
int pfk_kc_deinit(void);
int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size, u32 *key_index,
- bool async);
+ bool async, unsigned int data_unit);
void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size);
int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
@@ -27,6 +27,7 @@ int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
int pfk_kc_clear(void);
void pfk_kc_clear_on_reset(void);
+const char *pfk_kc_get_storage_type(void);
extern char *saved_command_line;
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index fc9c9b0ad5f6..63e40b5275a0 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -379,7 +379,7 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
unsigned int rshift = mc->rshift;
int max = mc->max;
int min = mc->min;
- unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
+ unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
unsigned int val;
int ret;
@@ -424,7 +424,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
unsigned int rshift = mc->rshift;
int max = mc->max;
int min = mc->min;
- unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
+ unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
int err = 0;
unsigned int val, val_mask, val2 = 0;