diff options
409 files changed, 69493 insertions, 4625 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-remoteproc b/Documentation/ABI/testing/sysfs-class-remoteproc new file mode 100644 index 000000000000..d188afebc8ba --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-remoteproc @@ -0,0 +1,50 @@ +What: /sys/class/remoteproc/.../firmware +Date: October 2016 +Contact: Matt Redfearn <matt.redfearn@imgtec.com> +Description: Remote processor firmware + + Reports the name of the firmware currently loaded to the + remote processor. + + To change the running firmware, ensure the remote processor is + stopped (using /sys/class/remoteproc/.../state) and write a new filename. + +What: /sys/class/remoteproc/.../state +Date: October 2016 +Contact: Matt Redfearn <matt.redfearn@imgtec.com> +Description: Remote processor state + + Reports the state of the remote processor, which will be one of: + + "offline" + "suspended" + "running" + "crashed" + "invalid" + + "offline" means the remote processor is powered off. + + "suspended" means that the remote processor is suspended and + must be woken to receive messages. + + "running" is the normal state of an available remote processor + + "crashed" indicates that a problem/crash has been detected on + the remote processor. + + "invalid" is returned if the remote processor is in an + unknown state. + + Writing this file controls the state of the remote processor. + The following states can be written: + + "start" + "stop" + + Writing "start" will attempt to start the processor running the + firmware indicated by, or written to, + /sys/class/remoteproc/.../firmware. The remote processor should + transition to "running" state. + + Writing "stop" will attempt to halt the remote processor and + return it to the "offline" state. diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt index 1333db9acfee..382a574a5c55 100644 --- a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt +++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt @@ -21,10 +21,17 @@ PROPERTIES the register region. An optional second element specifies the base address and size of the alias register region. +- clock-output-names: + Usage: optional + Value type: <string> + Definition: Name of the output clock. Typically acpuX_aux where X is a + CPU number starting at 0. + Example: clock-controller@2088000 { compatible = "qcom,kpss-acc-v2"; reg = <0x02088000 0x1000>, <0x02008000 0x1000>; + clock-output-names = "acpu0_aux"; }; diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt new file mode 100644 index 000000000000..d1e12f16a28c --- /dev/null +++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt @@ -0,0 +1,28 @@ +Krait Processor Sub-system (KPSS) Global Clock Controller (GCC) + +PROPERTIES + +- compatible: + Usage: required + Value type: <string> + Definition: should be one of: + "qcom,kpss-gcc" + +- reg: + Usage: required + Value type: <prop-encoded-array> + Definition: base address and size of the register region + +- clock-output-names: + Usage: required + Value type: <string> + Definition: Name of the output clock. Typically acpu_l2_aux indicating + an L2 cache auxiliary clock. + +Example: + + l2cc: clock-controller@2011000 { + compatible = "qcom,kpss-gcc"; + reg = <0x2011000 0x1000>; + clock-output-names = "acpu_l2_aux"; + }; diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt b/Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt new file mode 100644 index 000000000000..e7cb10426a3b --- /dev/null +++ b/Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt @@ -0,0 +1,38 @@ +Qualcomm Process Voltage Scaling Tables + +The node name is required to be "qcom,pvs". There shall only be one +such node present in the root of the tree. + +PROPERTIES + +- qcom,pvs-format-a or qcom,pvs-format-b: + Usage: required + Value type: <empty> + Definition: Indicates the format of qcom,speedX-pvsY-bin-vZ properties. + If qcom,pvs-format-a is used the table is two columns + (frequency and voltage in that order). If qcom,pvs-format-b is used the table is three columns (frequency, voltage, + and current in that order). + +- qcom,speedX-pvsY-bin-vZ: + Usage: required + Value type: <prop-encoded-array> + Definition: The PVS table corresponding to the speed bin X, pvs bin Y, + and version Z. +Example: + + qcom,pvs { + qcom,pvs-format-a; + qcom,speed0-pvs0-bin-v0 = + < 384000000 950000 >, + < 486000000 975000 >, + < 594000000 1000000 >, + < 702000000 1025000 >, + < 810000000 1075000 >, + < 918000000 1100000 >, + < 1026000000 1125000 >, + < 1134000000 1175000 >, + < 1242000000 1200000 >, + < 1350000000 1225000 >, + < 1458000000 1237500 >, + < 1512000000 1250000 >; + }; diff --git a/Documentation/devicetree/bindings/clock/qcom,a53cc.txt b/Documentation/devicetree/bindings/clock/qcom,a53cc.txt new file mode 100644 index 000000000000..82d1634a2713 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,a53cc.txt @@ -0,0 +1,23 @@ +Qualcomm A53 CPU Clock Controller Binding +------------------------------------------------ +The A53 CPU Clock Controller is hardware, which provides a combined +mux and divider functionality for the CPU clocks. It can choose between +a fixed rate clock and the dedicated A53 PLL. This hardware block is used +on platforms such as msm8916. + +Required properties : +- compatible : shall contain: + + "qcom,a53cc-msm8916" + +- reg : shall contain base register location and length + of the APCS region +- #clock-cells : shall contain 1 + +Example: + + apcs: syscon@b011000 { + compatible = "qcom,a53cc-msm8916"; + reg = <0x0b011000 0x1000>; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/qcom,a53pll.txt b/Documentation/devicetree/bindings/clock/qcom,a53pll.txt new file mode 100644 index 000000000000..6a8c03bfbcb5 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,a53pll.txt @@ -0,0 +1,20 @@ +MSM8916 A53 PLL Binding +--------------- +The A53 PLL on MSM8916 platforms is the main CPU PLL used for frequencies +above 1GHz. + +Required properties : +- compatible : Shall contain only one of the following: + + "qcom,a53pll-msm8916" + +- reg : shall contain base register location and length +- #clock-cells : must be set to <0> + +Example: + + a53pll: a53pll@b016000 { + compatible = "qcom,a53pll-msm8916"; + reg = <0x0b016000 0x40>; + #clock-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/clock/qcom,hfpll.txt b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt new file mode 100644 index 000000000000..fee92bb30344 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt @@ -0,0 +1,40 @@ +High-Frequency PLL (HFPLL) + +PROPERTIES + +- compatible: + Usage: required + Value type: <string> + Definition: must be "qcom,hfpll" + +- reg: + Usage: required + Value type: <prop-encoded-array> + Definition: address and size of HPLL registers. An optional second + element specifies the address and size of the alias + register region. + +- clock-output-names: + Usage: required + Value type: <string> + Definition: Name of the PLL. Typically hfpllX where X is a CPU number + starting at 0. Otherwise hfpll_Y where Y is more specific + such as "l2". + +Example: + +1) An HFPLL for the L2 cache. + + clock-controller@f9016000 { + compatible = "qcom,hfpll"; + reg = <0xf9016000 0x30>; + clock-output-names = "hfpll_l2"; + }; + +2) An HFPLL for CPU0. This HFPLL has the alias register region. + + clock-controller@f908a000 { + compatible = "qcom,hfpll"; + reg = <0xf908a000 0x30>, <0xf900a000 0x30>; + clock-output-names = "hfpll0"; + }; diff --git a/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt new file mode 100644 index 000000000000..874138f88ec6 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt @@ -0,0 +1,22 @@ +Krait Clock Controller + +PROPERTIES + +- compatible: + Usage: required + Value type: <string> + Definition: must be one of: + "qcom,krait-cc-v1" + "qcom,krait-cc-v2" + +- #clock-cells: + Usage: required + Value type: <u32> + Definition: must be 1 + +Example: + + kraitcc: clock-controller { + compatible = "qcom,krait-cc-v1"; + #clock-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt new file mode 100644 index 000000000000..87d3714b956a --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt @@ -0,0 +1,37 @@ +Qualcomm RPM Clock Controller Binding +------------------------------------------------ +The RPM is a dedicated hardware engine for managing the shared +SoC resources in order to keep the lowest power profile. It +communicates with other hardware subsystems via shared memory +and accepts clock requests, aggregates the requests and turns +the clocks on/off or scales them on demand. + +Required properties : +- compatible : shall contain only one of the following. The generic + compatible "qcom,rpmcc" should be also included. + + "qcom,rpmcc-msm8916", "qcom,rpmcc" + "qcom,rpmcc-apq8064", "qcom,rpmcc" + +- #clock-cells : shall contain 1 + +Example: + smd { + compatible = "qcom,smd"; + + rpm { + interrupts = <0 168 1>; + qcom,ipc = <&apcs 8 0>; + qcom,smd-edge = <15>; + + rpm_requests { + compatible = "qcom,rpm-msm8916"; + qcom,smd-channels = "rpm_requests"; + + rpmcc: clock-controller { + compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc"; + #clock-cells = <1>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt index 6532a59c9b43..00ea670b8c4d 100644 --- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt +++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt @@ -38,10 +38,22 @@ The following input format properties are required except in "rgb 1x" and - adi,input-justification: The input bit justification ("left", "evenly", "right"). +- avdd-supply: A 1.8V supply that powers up the AVDD pin on the chip. +- dvdd-supply: A 1.8V supply that powers up the DVDD pin on the chip. +- pvdd-supply: A 1.8V supply that powers up the PVDD pin on the chip. +- dvdd-3v-supply: A 3.3V supply that powers up the pin called DVDD_3V + on the chip. +- bgvdd-supply: A 1.8V supply that powers up the BGVDD pin. This is + needed only for ADV7511. + The following properties are required for ADV7533: - adi,dsi-lanes: Number of DSI data lanes connected to the DSI host. It should be one of 1, 2, 3 or 4. +- a2vdd-supply: 1.8V supply that powers up the A2VDD pin on the chip. +- v3p3-supply: A 3.3V supply that powers up the V3P3 pin on the chip. +- v1p2-supply: A supply that powers up the V1P2 pin on the chip. It can be + either 1.2V or 1.8V. Optional properties: diff --git a/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt b/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt new file mode 100644 index 000000000000..578b77b7e7a6 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt @@ -0,0 +1,53 @@ +Qualcomm Camera Control Interface I2C controller + +Required properties: + - compatible: Should be one of: + - "qcom,cci-v1.0.8" for 8916; + - "qcom,cci-v1.4.0" for 8996. + - #address-cells: Should be <1>. + - #size-cells: Should be <0>. + - reg: Base address of the controller and length of memory mapped region. + - reg-names: Should be "cci". + - interrupts: Specifier for CCI interrupt. + - interrupt-names: Should be "cci". + - clocks: List of clock specifiers, one for each entry in clock-names. + - clock-names: Should contain: + - "mmss_mmagic_ahb" - on 8996 only; + - "camss_top_ahb"; + - "cci_ahb"; + - "cci"; + - "camss_ahb". + +Required properties on 8996: + - power-domains: Power domain specifier. + +Optional: + - clock-frequency: Desired I2C bus clock frequency in Hz, defaults to 100 kHz + if omitted. + +Example: + + cci: qcom,cci@a0c000 { + compatible = "qcom,cci-v1.4.0"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa0c000 0x1000>; + reg-names = "cci"; + interrupts = <GIC_SPI 295 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "cci"; + power-domains = <&mmcc CAMSS_GDSC>; + clocks = <&mmcc MMSS_MMAGIC_AHB_CLK>, + <&mmcc CAMSS_TOP_AHB_CLK>, + <&mmcc CAMSS_CCI_AHB_CLK>, + <&mmcc CAMSS_CCI_CLK>, + <&mmcc CAMSS_AHB_CLK>; + clock-names = "mmss_mmagic_ahb", + "camss_top_ahb", + "cci_ahb", + "cci", + "camss_ahb"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&cci0_default>; + pinctrl-1 = <&cci0_sleep>; + clock-frequency = <400000>; + }; diff --git a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt index 07bf55f6e0b9..f3ae70b50b1e 100644 --- a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt +++ b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt @@ -32,6 +32,17 @@ PROPERTIES Definition: presence of this property indicates that the KPDPWR_N pin should be configured for pull up. +- resin-pull-up: + Usage: optional + Value type: <empty> + Definition: presence of this property indicates that the RESIN_N pin + should be configured for pull up. + +- linux,code: + Usage: optional + Value type: <empty> + Definition: Keycode to emit when RESIN_N input change its state. + EXAMPLE pwrkey@800 { diff --git a/Documentation/devicetree/bindings/media/i2c/ov5645.txt b/Documentation/devicetree/bindings/media/i2c/ov5645.txt new file mode 100644 index 000000000000..fd7aec9f8e24 --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/ov5645.txt @@ -0,0 +1,54 @@ +* Omnivision 1/4-Inch 5Mp CMOS Digital Image Sensor + +The Omnivision OV5645 is a 1/4-Inch CMOS active pixel digital image sensor with +an active array size of 2592H x 1944V. It is programmable through a serial I2C +interface. + +Required Properties: +- compatible: Value should be "ovti,ov5645". +- clocks: Reference to the xclk clock. +- clock-names: Should be "xclk". +- clock-frequency: Frequency of the xclk clock. +- enable-gpios: Chip enable GPIO. Polarity is GPIO_ACTIVE_HIGH. This corresponds + to the hardware pin PWDNB which is physically active low. +- reset-gpios: Chip reset GPIO. Polarity is GPIO_ACTIVE_LOW. This corresponds to + the hardware pin RESETB. +- vdddo-supply: Chip digital IO regulator. +- vdda-supply: Chip analog regulator. +- vddd-supply: Chip digital core regulator. + +The device node must contain one 'port' child node for its digital output +video port, in accordance with the video interface bindings defined in +Documentation/devicetree/bindings/media/video-interfaces.txt. + +Example: + + &i2c1 { + ... + + ov5645: ov5645@78 { + compatible = "ovti,ov5645"; + reg = <0x78>; + + enable-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio5 20 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&camera_rear_default>; + + clocks = <&clks 200>; + clock-names = "xclk"; + clock-frequency = <23880000>; + + vdddo-supply = <&camera_dovdd_1v8>; + vdda-supply = <&camera_avdd_2v8>; + vddd-supply = <&camera_dvdd_1v2>; + + port { + ov5645_ep: endpoint { + clock-lanes = <1>; + data-lanes = <0 2>; + remote-endpoint = <&csi0_ep>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/media/qcom,camss.txt b/Documentation/devicetree/bindings/media/qcom,camss.txt new file mode 100644 index 000000000000..cadecebc73f7 --- /dev/null +++ b/Documentation/devicetree/bindings/media/qcom,camss.txt @@ -0,0 +1,197 @@ +Qualcomm Camera Subsystem + +* Properties + +- compatible: + Usage: required + Value type: <stringlist> + Definition: Should contain: + - "qcom,msm8916-camss" +- reg: + Usage: required + Value type: <prop-encoded-array> + Definition: Register ranges as listed in the reg-names property. +- reg-names: + Usage: required + Value type: <stringlist> + Definition: Should contain the following entries: + - "csiphy0" + - "csiphy0_clk_mux" + - "csiphy1" + - "csiphy1_clk_mux" + - "csid0" + - "csid1" + - "ispif" + - "csi_clk_mux" + - "vfe0" +- interrupts: + Usage: required + Value type: <prop-encoded-array> + Definition: Interrupts as listed in the interrupt-names property. +- interrupt-names: + Usage: required + Value type: <stringlist> + Definition: Should contain the following entries: + - "csiphy0" + - "csiphy1" + - "csid0" + - "csid1" + - "ispif" + - "vfe0" +- power-domains: + Usage: required + Value type: <prop-encoded-array> + Definition: A phandle and power domain specifier pairs to the + power domain which is responsible for collapsing + and restoring power to the peripheral. +- clocks: + Usage: required + Value type: <prop-encoded-array> + Definition: A list of phandle and clock specifier pairs as listed + in clock-names property. +- clock-names: + Usage: required + Value type: <stringlist> + Definition: Should contain the following entries: + - "camss_top_ahb" + - "ispif_ahb" + - "csiphy0_timer" + - "csiphy1_timer" + - "csi0_ahb" + - "csi0" + - "csi0_phy" + - "csi0_pix" + - "csi0_rdi" + - "csi1_ahb" + - "csi1" + - "csi1_phy" + - "csi1_pix" + - "csi1_rdi" + - "camss_ahb" + - "camss_vfe_vfe" + - "camss_csi_vfe" + - "iface" + - "bus" +- vdda-supply: + Usage: required + Value type: <phandle> + Definition: A phandle to voltage supply for CSI2. +- iommus: + Usage: required + Value type: <prop-encoded-array> + Definition: A list of phandle and IOMMU specifier pairs. + +* Nodes + +- ports: + Usage: required + Definition: As described in video-interfaces.txt in same directory. + Properties: + - reg: + Usage: required + Value type: <u32> + Definition: Selects CSI2 PHY interface - PHY0 or PHY1. + Endpoint node properties: + - clock-lanes: + Usage: required + Value type: <u32> + Definition: The physical clock lane index. The value + must always be <1> as the physical clock + lane is lane 1. + - data-lanes: + Usage: required + Value type: <prop-encoded-array> + Definition: An array of physical data lanes indexes. + Position of an entry determines the logical + lane number, while the value of an entry + indicates physical lane index. Lane swapping + is supported. + +* An Example + + camss: camss@1b00000 { + compatible = "qcom,msm8916-camss"; + reg = <0x1b0ac00 0x200>, + <0x1b00030 0x4>, + <0x1b0b000 0x200>, + <0x1b00038 0x4>, + <0x1b08000 0x100>, + <0x1b08400 0x100>, + <0x1b0a000 0x500>, + <0x1b00020 0x10>, + <0x1b10000 0x1000>; + reg-names = "csiphy0", + "csiphy0_clk_mux", + "csiphy1", + "csiphy1_clk_mux", + "csid0", + "csid1", + "ispif", + "csi_clk_mux", + "vfe0"; + interrupts = <GIC_SPI 78 0>, + <GIC_SPI 79 0>, + <GIC_SPI 51 0>, + <GIC_SPI 52 0>, + <GIC_SPI 55 0>, + <GIC_SPI 57 0>; + interrupt-names = "csiphy0", + "csiphy1", + "csid0", + "csid1", + "ispif", + "vfe0"; + power-domains = <&gcc VFE_GDSC>; + clocks = <&gcc GCC_CAMSS_TOP_AHB_CLK>, + <&gcc GCC_CAMSS_ISPIF_AHB_CLK>, + <&gcc GCC_CAMSS_CSI0PHYTIMER_CLK>, + <&gcc GCC_CAMSS_CSI1PHYTIMER_CLK>, + <&gcc GCC_CAMSS_CSI0_AHB_CLK>, + <&gcc GCC_CAMSS_CSI0_CLK>, + <&gcc GCC_CAMSS_CSI0PHY_CLK>, + <&gcc GCC_CAMSS_CSI0PIX_CLK>, + <&gcc GCC_CAMSS_CSI0RDI_CLK>, + <&gcc GCC_CAMSS_CSI1_AHB_CLK>, + <&gcc GCC_CAMSS_CSI1_CLK>, + <&gcc GCC_CAMSS_CSI1PHY_CLK>, + <&gcc GCC_CAMSS_CSI1PIX_CLK>, + <&gcc GCC_CAMSS_CSI1RDI_CLK>, + <&gcc GCC_CAMSS_AHB_CLK>, + <&gcc GCC_CAMSS_VFE0_CLK>, + <&gcc GCC_CAMSS_CSI_VFE0_CLK>, + <&gcc GCC_CAMSS_VFE_AHB_CLK>, + <&gcc GCC_CAMSS_VFE_AXI_CLK>; + clock-names = "camss_top_ahb", + "ispif_ahb", + "csiphy0_timer", + "csiphy1_timer", + "csi0_ahb", + "csi0", + "csi0_phy", + "csi0_pix", + "csi0_rdi", + "csi1_ahb", + "csi1", + "csi1_phy", + "csi1_pix", + "csi1_rdi", + "camss_ahb", + "camss_vfe_vfe", + "camss_csi_vfe", + "iface", + "bus"; + vdda-supply = <&pm8916_l2>; + iommus = <&apps_iommu 3>; + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + csiphy0_ep: endpoint { + clock-lanes = <1>; + data-lanes = <0 2>; + remote-endpoint = <&ov5645_ep>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/media/qcom,venus.txt b/Documentation/devicetree/bindings/media/qcom,venus.txt new file mode 100644 index 000000000000..2693449daf73 --- /dev/null +++ b/Documentation/devicetree/bindings/media/qcom,venus.txt @@ -0,0 +1,107 @@ +* Qualcomm Venus video encoder/decoder accelerators + +- compatible: + Usage: required + Value type: <stringlist> + Definition: Value should contain one of: + - "qcom,msm8916-venus" + - "qcom,msm8996-venus" +- reg: + Usage: required + Value type: <prop-encoded-array> + Definition: Register base address and length of the register map. +- interrupts: + Usage: required + Value type: <prop-encoded-array> + Definition: Should contain interrupt line number. +- clocks: + Usage: required + Value type: <prop-encoded-array> + Definition: A List of phandle and clock specifier pairs as listed + in clock-names property. +- clock-names: + Usage: required for msm8916 + Value type: <stringlist> + Definition: Should contain the following entries: + - "core" Core video accelerator clock + - "iface" Video accelerator AHB clock + - "bus" Video accelerator AXI clock +- clock-names: + Usage: required for msm8996 + Value type: <stringlist> + Definition: Should contain the following entries: + - "core" Core video accelerator clock + - "iface" Video accelerator AHB clock + - "bus" Video accelerator AXI clock + - "mbus" Video MAXI clock +- power-domains: + Usage: required + Value type: <prop-encoded-array> + Definition: A phandle and power domain specifier pairs to the + power domain which is responsible for collapsing + and restoring power to the peripheral. +- iommus: + Usage: required + Value type: <prop-encoded-array> + Definition: A list of phandle and IOMMU specifier pairs. +- memory-region: + Usage: required + Value type: <phandle> + Definition: reference to the reserved-memory for the firmware + memory region. + +* Subnodes +The Venus video-codec node must contain two subnodes representing +video-decoder and video-encoder. + +Every of video-encoder or video-decoder subnode should have: + +- compatible: + Usage: required + Value type: <stringlist> + Definition: Value should contain "venus-decoder" or "venus-encoder" +- clocks: + Usage: required for msm8996 + Value type: <prop-encoded-array> + Definition: A List of phandle and clock specifier pairs as listed + in clock-names property. +- clock-names: + Usage: required for msm8996 + Value type: <stringlist> + Definition: Should contain the following entries: + - "core" Subcore video accelerator clock + +- power-domains: + Usage: required for msm8996 + Value type: <prop-encoded-array> + Definition: A phandle and power domain specifier pairs to the + power domain which is responsible for collapsing + and restoring power to the subcore. + +* An Example + video-codec@1d00000 { + compatible = "qcom,msm8916-venus"; + reg = <0x01d00000 0xff000>; + interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&gcc GCC_VENUS0_VCODEC0_CLK>, + <&gcc GCC_VENUS0_AHB_CLK>, + <&gcc GCC_VENUS0_AXI_CLK>; + clock-names = "core", "iface", "bus"; + power-domains = <&gcc VENUS_GDSC>; + iommus = <&apps_iommu 5>; + memory-region = <&venus_mem>; + + video-decoder { + compatible = "venus-decoder"; + clocks = <&mmcc VIDEO_SUBCORE0_CLK>; + clock-names = "core"; + power-domains = <&mmcc VENUS_CORE0_GDSC>; + }; + + video-encoder { + compatible = "venus-encoder"; + clocks = <&mmcc VIDEO_SUBCORE1_CLK>; + clock-names = "core"; + power-domains = <&mmcc VENUS_CORE1_GDSC>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/bluetooth.txt b/Documentation/devicetree/bindings/net/bluetooth.txt new file mode 100644 index 000000000000..94797df751b8 --- /dev/null +++ b/Documentation/devicetree/bindings/net/bluetooth.txt @@ -0,0 +1,5 @@ +The following properties are common to the Bluetooth controllers: + +- local-bd-address: array of 6 bytes, specifies the BD address that was + uniquely assigned to the Bluetooth device, formatted with least significant + byte first (little-endian). diff --git a/Documentation/devicetree/bindings/power/avs/qcom,cpr.txt b/Documentation/devicetree/bindings/power/avs/qcom,cpr.txt new file mode 100644 index 000000000000..873128c66644 --- /dev/null +++ b/Documentation/devicetree/bindings/power/avs/qcom,cpr.txt @@ -0,0 +1,125 @@ +QCOM CPR (Core Power Reduction) + +CPR (Core Power Reduction) is a technology to reduce core power on a CPU +or other device. Each OPP of a device corresponds to a "corner" that has +a range of valid voltages for a particular frequency. While the device is +running at a particular frequency, CPR monitors dynamic factors such as +temperature, etc. and suggests adjustments to the voltage to save power +and meet silicon characteristic requirements. + +- compatible: + Usage: required + Value type: <string> + Definition: must be "qcom,cpr" + +- reg: + Usage: required + Value type: <prop-encoded-array> + Definition: base address and size of the rbcpr register region + +- interrupts: + Usage: required + Value type: <prop-encoded-array> + Definition: list of three interrupts in order of irq0, irq1, irq2 + +- acc-syscon: + Usage: optional + Value type: <phandle> + Definition: phandle to syscon for writing ACC settings + +- nvmem: + Usage: required + Value type: <phandle> + Definition: phandle to nvmem provider containing efuse settings + +- nvmem-names: + Usage: required + Value type: <string> + Definition: must be "qfprom" + +vdd-mx-supply = <&pm8916_l3>; + +- qcom,cpr-ref-clk: + Usage: required + Value type: <u32> + Definition: rate of reference clock in kHz + +- qcom,cpr-timer-delay-us: + Usage: required + Value type: <u32> + Definition: delay in uS for the timer interval + +- qcom,cpr-timer-cons-up: + Usage: required + Value type: <u32> + Definition: Consecutive number of timer intervals, or units of + qcom,cpr-timer-delay-us, that occur before issuing an up + interrupt + +- qcom,cpr-timer-cons-down: + Usage: required + Value type: <u32> + Definition: Consecutive number of timer intervals, or units of + qcom,cpr-timer-delay-us, that occur before issuing a down + interrupt + +- qcom,cpr-up-threshold: + Usage: optional + Value type: <u32> + Definition: The threshold for CPR to issue interrupt when error_steps + is greater than it when stepping up + +- qcom,cpr-down-threshold: + Usage: optional + Value type: <u32> + Definition: The threshold for CPR to issue interrdownt when error_steps + is greater than it when stepping down + +- qcom,cpr-down-threshold: + Usage: optional + Value type: <u32> + Definition: Idle clock cycles ring oscillator can be in + +- qcom,cpr-gcnt-us: + Usage: required + Value type: <u32> + Definition: The time for gate count in uS + +- qcom,vdd-apc-step-up-limit: + Usage: required + Value type: <u32> + Definition: Limit of vdd-apc-supply steps for scaling up + +- qcom,vdd-apc-step-down-limit: + Usage: required + Value type: <u32> + Definition: Limit of vdd-apc-supply steps for scaling down + +- qcom,cpr-cpus: + Usage: required + Value type: <prop-encoded-array> + Definition: List of CPUs that are being monitored + +Example: + + avs@b018000 { + compatible = "qcom,cpr"; + reg = <0xb018000 0x1000>; + interrupts = <0 15 1>, <0 16 1>, <0 17 1>; + vdd-mx-supply = <&pm8916_l3>; + acc-syscon = <&tcsr>; + nvmem = <&qfprom>; + nvmem-names = "qfprom"; + + qcom,cpr-ref-clk = <19200>; + qcom,cpr-timer-delay-us = <5000>; + qcom,cpr-timer-cons-up = <0>; + qcom,cpr-timer-cons-down = <2>; + qcom,cpr-up-threshold = <0>; + qcom,cpr-down-threshold = <2>; + qcom,cpr-idle-clocks = <15>; + qcom,cpr-gcnt-us = <1>; + qcom,vdd-apc-step-up-limit = <1>; + qcom,vdd-apc-step-down-limit = <1>; + qcom,cpr-cpus = <&CPU0 &CPU1 &CPU2 &CPU3>; + }; diff --git a/Documentation/devicetree/bindings/regulator/qcom,saw-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,saw-regulator.txt new file mode 100644 index 000000000000..977fec08b2ae --- /dev/null +++ b/Documentation/devicetree/bindings/regulator/qcom,saw-regulator.txt @@ -0,0 +1,31 @@ +Qualcomm SAW Regulators + +SAW (Subsystem Power Manager and Adaptive Voltage Scaling Wrapper) is a hardware +block in the Qualcomm chipsets that regulates the power to the CPU cores on devices +such as APQ8064, MSM8974, APQ8084 and others. + +- compatible: + Usage: required + Value type: <string> + Definition: must be one of: + "qcom,apq8064-saw2-v1.1-regulator" + +Example: + saw0: power-controller@2089000 { + compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2", "syscon", "simple-mfd"; + reg = <0x02089000 0x1000>, <0x02009000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + + saw0_regulator: regulator@2089000 { + compatible = "qcom,apq8064-saw2-v1.1-regulator"; + regulator-always-on; + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <1250000>; + }; + }; + + + &CPU0 { + cpu-supply = <&saw0_regulator>; + }; diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt index 57cb49ec55ca..92347fe6890e 100644 --- a/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt +++ b/Documentation/devicetree/bindings/remoteproc/qcom,q6v5.txt @@ -7,7 +7,9 @@ on the Qualcomm Hexagon core. Usage: required Value type: <string> Definition: must be one of: - "qcom,q6v5-pil" + "qcom,q6v5-pil", + "qcom,msm8916-mss-pil", + "qcom,msm8974-mss-pil" - reg: Usage: required diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.txt index 4ea39e9186a7..042a2e4159bd 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,wcnss.txt @@ -37,6 +37,11 @@ The following properties are defined to the bluetooth node: Definition: must be: "qcom,wcnss-bt" +- local-bd-address: + Usage: optional + Value type: <u8 array> + Definition: see Documentation/devicetree/bindings/net/bluetooth.txt + == WiFi The following properties are defined to the WiFi node: @@ -91,6 +96,9 @@ smd { bt { compatible = "qcom,wcnss-bt"; + + /* BD address 00:11:22:33:44:55 */ + local-bd-address = [ 55 44 33 22 11 00 ]; }; wlan { diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt index d9d8635ff94c..6a4aadc4ce06 100644 --- a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt +++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt @@ -44,8 +44,7 @@ Required dai-link subnodes: Required CPU/CODEC subnodes properties: -link-name : Name of the dai link. --sound-dai : phandle and port of CPU/CODEC --capture-dai : phandle and port of CPU/CODEC +-sound-dai : phandle/s and port of CPU/CODEC Example: @@ -73,7 +72,7 @@ sound: sound { sound-dai = <&lpass MI2S_PRIMARY>; }; codec { - sound-dai = <&wcd_codec 0>; + sound-dai = <&lpass_codec 0>, <&wcd_codec 0>; }; }; diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt new file mode 100644 index 000000000000..ccb401cfef9d --- /dev/null +++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt @@ -0,0 +1,85 @@ +msm8916 analog audio CODEC + +Bindings for codec Analog IP which is integrated in pmic pm8916, + +## Bindings for codec core on pmic: + +Required properties + - compatible = "qcom,pm8916-wcd-analog-codec"; + - reg: represents the slave base address provided to the peripheral. + - interrupt-parent : The parent interrupt controller. + - interrupts: List of interrupts in given SPMI peripheral. + - interrupt-names: Names specified to above list of interrupts in same + order. List of supported interrupt names are: + "cdc_spk_cnp_int" - Speaker click and pop interrupt. + "cdc_spk_clip_int" - Speaker clip interrupt. + "cdc_spk_ocp_int" - Speaker over current protect interrupt. + "mbhc_ins_rem_det1" - jack insert removal detect interrupt 1. + "mbhc_but_rel_det" - button release interrupt. + "mbhc_but_press_det" - button press event + "mbhc_ins_rem_det" - jack insert removal detect interrupt. + "mbhc_switch_int" - multi button headset interrupt. + "cdc_ear_ocp_int" - Earphone over current protect interrupt. + "cdc_hphr_ocp_int" - Headphone R over current protect interrupt. + "cdc_hphl_ocp_det" - Headphone L over current protect interrupt. + "cdc_ear_cnp_int" - earphone cnp interrupt. + "cdc_hphr_cnp_int" - hphr click and pop interrupt. + "cdc_hphl_cnp_int" - hphl click and pop interrupt. + + - clocks: Handle to mclk. + - clock-names: should be "mclk" + - vdd-cdc-io-supply: phandle to VDD_CDC_IO regulator DT node. + - vdd-cdc-tx-rx-cx-supply: phandle to VDD_CDC_TX/RX/CX regulator DT node. + - vdd-micbias-supply: phandle of VDD_MICBIAS supply's regulator DT node. + +Optional Properties: +- qcom,micbias1-ext-cap: boolean, present if micbias1 has external capacitor + connected. +- qcom,micbias2-ext-cap: boolean, present if micbias2 has external capacitor + connected. + +Example: + +spmi_bus { + ... + audio-codec@f000{ + compatible = "qcom,pm8916-wcd-analog-codec"; + reg = <0xf000 0x200>; + reg-names = "pmic-codec-core"; + clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>; + clock-names = "mclk"; + interrupt-parent = <&spmi_bus>; + interrupts = <0x1 0xf0 0x0 IRQ_TYPE_NONE>, + <0x1 0xf0 0x1 IRQ_TYPE_NONE>, + <0x1 0xf0 0x2 IRQ_TYPE_NONE>, + <0x1 0xf0 0x3 IRQ_TYPE_NONE>, + <0x1 0xf0 0x4 IRQ_TYPE_NONE>, + <0x1 0xf0 0x5 IRQ_TYPE_NONE>, + <0x1 0xf0 0x6 IRQ_TYPE_NONE>, + <0x1 0xf0 0x7 IRQ_TYPE_NONE>, + <0x1 0xf1 0x0 IRQ_TYPE_NONE>, + <0x1 0xf1 0x1 IRQ_TYPE_NONE>, + <0x1 0xf1 0x2 IRQ_TYPE_NONE>, + <0x1 0xf1 0x3 IRQ_TYPE_NONE>, + <0x1 0xf1 0x4 IRQ_TYPE_NONE>, + <0x1 0xf1 0x5 IRQ_TYPE_NONE>; + interrupt-names = "cdc_spk_cnp_int", + "cdc_spk_clip_int", + "cdc_spk_ocp_int", + "mbhc_ins_rem_det1", + "mbhc_but_rel_det", + "mbhc_but_press_det", + "mbhc_ins_rem_det", + "mbhc_switch_int", + "cdc_ear_ocp_int", + "cdc_hphr_ocp_int", + "cdc_hphl_ocp_det", + "cdc_ear_cnp_int", + "cdc_hphr_cnp_int", + "cdc_hphl_cnp_int"; + VDD-CDC-IO-supply = <&pm8916_l5>; + VDD-CDC-TX-RX-CX-supply = <&pm8916_l5>; + VDD-MICBIAS-supply = <&pm8916_l13>; + #sound-dai-cells = <1>; + }; +}; diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt new file mode 100644 index 000000000000..1c8e4cb25176 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt @@ -0,0 +1,20 @@ +msm8916 digital audio CODEC + +## Bindings for codec core in lpass: + +Required properties + - compatible = "qcom,msm8916-wcd-digital-codec"; + - reg: address space for lpass codec. + - clocks: Handle to mclk and ahbclk + - clock-names: should be "mclk", "ahbix-clk". + +Example: + +audio-codec@771c000{ + compatible = "qcom,msm8916-wcd-digital-codec"; + reg = <0x0771c000 0x400>; + clocks = <&gcc GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK>, + <&gcc GCC_CODEC_DIGCODEC_CLK>; + clock-names = "ahbix-clk", "mclk"; + #sound-dai-cells = <1>; +}; diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst index 37284bcc7764..25ee92c5df65 100644 --- a/Documentation/gpu/drm-internals.rst +++ b/Documentation/gpu/drm-internals.rst @@ -350,6 +350,23 @@ how the ioctl is allowed to be called. .. kernel-doc:: drivers/gpu/drm/drm_ioctl.c :export: + +Misc Utilities +============== + +Printer +------- + +.. kernel-doc:: include/drm/drm_print.h + :doc: print + +.. kernel-doc:: include/drm/drm_print.h + :internal: + +.. kernel-doc:: include/drm/drm_print.h + :export: + + Legacy Support Code =================== diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 81c7f2bb7daf..08244bea5048 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -321,6 +321,7 @@ Code Seq#(hex) Include File Comments 0xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca> 0xB3 00 linux/mmc/ioctl.h 0xB4 00-0F linux/gpio.h <mailto:linux-gpio@vger.kernel.org> +0xB5 00-0F uapi/linux/rpmsg.h <mailto:linux-remoteproc@vger.kernel.org> 0xC0 00-0F linux/usb/iowarrior.h 0xCA 00-0F uapi/misc/cxl.h 0xCA 80-8F uapi/scsi/cxlflash_ioctl.h diff --git a/Documentation/media/uapi/v4l/pixfmt-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-rgb.rst index 9cc980882e80..81412f76acb0 100644 --- a/Documentation/media/uapi/v4l/pixfmt-rgb.rst +++ b/Documentation/media/uapi/v4l/pixfmt-rgb.rst @@ -18,3 +18,4 @@ RGB Formats pixfmt-srggb10alaw8 pixfmt-srggb10dpcm8 pixfmt-srggb12 + pixfmt-srggb12p diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst new file mode 100644 index 000000000000..f8e99127f02d --- /dev/null +++ b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst @@ -0,0 +1,108 @@ +.. -*- coding: utf-8; mode: rst -*- + +.. _V4L2-PIX-FMT-SRGGB12P: +.. _v4l2-pix-fmt-sbggr12p: +.. _v4l2-pix-fmt-sgbrg12p: +.. _v4l2-pix-fmt-sgrbg12p: + +******************************************************************************************************************************* +V4L2_PIX_FMT_SRGGB12P ('pRAA'), V4L2_PIX_FMT_SGRBG12P ('pgAA'), V4L2_PIX_FMT_SGBRG12P ('pGAA'), V4L2_PIX_FMT_SBGGR12P ('pBAA'), +******************************************************************************************************************************* + +*man V4L2_PIX_FMT_SRGGB12P(2)* + +V4L2_PIX_FMT_SGRBG12P +V4L2_PIX_FMT_SGBRG12P +V4L2_PIX_FMT_SBGGR12P +12-bit packed Bayer formats + + +Description +=========== + +These four pixel formats are packed raw sRGB / Bayer formats with 12 +bits per colour. Every two consecutive samples are packed into three +bytes. Each of the first two bytes contain the 8 high order bits of +the pixels, and the third byte contains the four least significants +bits of each pixel, in the same order. + +Each n-pixel row contains n/2 green samples and n/2 blue or red +samples, with alternating green-red and green-blue rows. They are +conventionally described as GRGR... BGBG..., RGRG... GBGB..., etc. +Below is an example of one of these formats: + +**Byte Order.** +Each cell is one byte. + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + :widths: 2 1 1 1 1 1 1 + + + - .. row 1 + + - start + 0: + + - B\ :sub:`00high` + + - G\ :sub:`01high` + + - G\ :sub:`01low`\ (bits 7--4) B\ :sub:`00low`\ (bits 3--0) + + - B\ :sub:`02high` + + - G\ :sub:`03high` + + - G\ :sub:`03low`\ (bits 7--4) B\ :sub:`02low`\ (bits 3--0) + + - .. row 2 + + - start + 6: + + - G\ :sub:`10high` + + - R\ :sub:`11high` + + - R\ :sub:`11low`\ (bits 7--4) G\ :sub:`10low`\ (bits 3--0) + + - G\ :sub:`12high` + + - R\ :sub:`13high` + + - R\ :sub:`13low`\ (bits 3--2) G\ :sub:`12low`\ (bits 3--0) + + - .. row 3 + + - start + 12: + + - B\ :sub:`20high` + + - G\ :sub:`21high` + + - G\ :sub:`21low`\ (bits 7--4) B\ :sub:`20low`\ (bits 3--0) + + - B\ :sub:`22high` + + - G\ :sub:`23high` + + - G\ :sub:`23low`\ (bits 7--4) B\ :sub:`22low`\ (bits 3--0) + + - .. row 4 + + - start + 18: + + - G\ :sub:`30high` + + - R\ :sub:`31high` + + - R\ :sub:`31low`\ (bits 7--4) G\ :sub:`30low`\ (bits 3--0) + + - G\ :sub:`32high` + + - R\ :sub:`33high` + + - R\ :sub:`33low`\ (bits 3--2) G\ :sub:`32low`\ (bits 3--0) + diff --git a/Documentation/media/uapi/v4l/vidioc-cropcap.rst b/Documentation/media/uapi/v4l/vidioc-cropcap.rst index f21a69b554e1..0f80d5ca2643 100644 --- a/Documentation/media/uapi/v4l/vidioc-cropcap.rst +++ b/Documentation/media/uapi/v4l/vidioc-cropcap.rst @@ -39,17 +39,10 @@ structure. Drivers fill the rest of the structure. The results are constant except when switching the video standard. Remember this switch can occur implicit when switching the video input or output. -Do not use the multiplanar buffer types. Use -``V4L2_BUF_TYPE_VIDEO_CAPTURE`` instead of -``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and use -``V4L2_BUF_TYPE_VIDEO_OUTPUT`` instead of -``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``. - This ioctl must be implemented for video capture or output devices that support cropping and/or scaling and/or have non-square pixels, and for overlay devices. - .. c:type:: v4l2_cropcap .. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}| @@ -62,9 +55,9 @@ overlay devices. * - __u32 - ``type`` - Type of the data stream, set by the application. Only these types - are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``, - ``V4L2_BUF_TYPE_VIDEO_OUTPUT`` and - ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type`. + are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``, ``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``, + ``V4L2_BUF_TYPE_VIDEO_OUTPUT``, ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE`` and + ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type` and the note above. * - struct :ref:`v4l2_rect <v4l2-rect-crop>` - ``bounds`` - Defines the window within capturing or output is possible, this @@ -90,6 +83,16 @@ overlay devices. ``pixelaspect`` to 1/1. Other common values are 54/59 for PAL and SECAM, 11/10 for NTSC sampled according to [:ref:`itu601`]. +.. note:: + Unfortunately in the case of multiplanar buffer types + (``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``) + this API was messed up with regards to how the :c:type:`v4l2_cropcap` ``type`` field + should be filled in. Some drivers only accepted the ``_MPLANE`` buffer type while + other drivers only accepted a non-multiplanar buffer type (i.e. without the + ``_MPLANE`` at the end). + + Starting with kernel 4.13 both variations are allowed. + .. _v4l2-rect-crop: diff --git a/Documentation/media/uapi/v4l/vidioc-g-crop.rst b/Documentation/media/uapi/v4l/vidioc-g-crop.rst index 56a36340f565..13771ee3e94a 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-crop.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-crop.rst @@ -45,12 +45,6 @@ and struct :c:type:`v4l2_rect` substructure named ``c`` of a v4l2_crop structure and call the :ref:`VIDIOC_S_CROP <VIDIOC_G_CROP>` ioctl with a pointer to this structure. -Do not use the multiplanar buffer types. Use -``V4L2_BUF_TYPE_VIDEO_CAPTURE`` instead of -``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and use -``V4L2_BUF_TYPE_VIDEO_OUTPUT`` instead of -``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``. - The driver first adjusts the requested dimensions against hardware limits, i. e. the bounds given by the capture/output window, and it rounds to the closest possible values of horizontal and vertical offset, @@ -87,14 +81,24 @@ When cropping is not supported then no parameters are changed and * - __u32 - ``type`` - Type of the data stream, set by the application. Only these types - are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``, - ``V4L2_BUF_TYPE_VIDEO_OUTPUT`` and - ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type`. + are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``, ``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``, + ``V4L2_BUF_TYPE_VIDEO_OUTPUT``, ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE`` and + ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type` and the note above. * - struct :c:type:`v4l2_rect` - ``c`` - Cropping rectangle. The same co-ordinate system as for struct :c:type:`v4l2_cropcap` ``bounds`` is used. +.. note:: + Unfortunately in the case of multiplanar buffer types + (``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``) + this API was messed up with regards to how the :c:type:`v4l2_crop` ``type`` field + should be filled in. Some drivers only accepted the ``_MPLANE`` buffer type while + other drivers only accepted a non-multiplanar buffer type (i.e. without the + ``_MPLANE`` at the end). + + Starting with kernel 4.13 both variations are allowed. + Return Value ============ diff --git a/Documentation/media/uapi/v4l/vidioc-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-g-selection.rst index 3145a9166bad..f8abf52948ff 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-selection.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-selection.rst @@ -42,11 +42,7 @@ The ioctls are used to query and configure selection rectangles. To query the cropping (composing) rectangle set struct :c:type:`v4l2_selection` ``type`` field to the -respective buffer type. Do not use the multiplanar buffer types. Use -``V4L2_BUF_TYPE_VIDEO_CAPTURE`` instead of -``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and use -``V4L2_BUF_TYPE_VIDEO_OUTPUT`` instead of -``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``. The next step is setting the +respective buffer type. The next step is setting the value of struct :c:type:`v4l2_selection` ``target`` field to ``V4L2_SEL_TGT_CROP`` (``V4L2_SEL_TGT_COMPOSE``). Please refer to table :ref:`v4l2-selections-common` or :ref:`selection-api` for @@ -64,11 +60,7 @@ pixels. To change the cropping (composing) rectangle set the struct :c:type:`v4l2_selection` ``type`` field to the -respective buffer type. Do not use multiplanar buffers. Use -``V4L2_BUF_TYPE_VIDEO_CAPTURE`` instead of -``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``. Use -``V4L2_BUF_TYPE_VIDEO_OUTPUT`` instead of -``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``. The next step is setting the +respective buffer type. The next step is setting the value of struct :c:type:`v4l2_selection` ``target`` to ``V4L2_SEL_TGT_CROP`` (``V4L2_SEL_TGT_COMPOSE``). Please refer to table :ref:`v4l2-selections-common` or :ref:`selection-api` for additional @@ -169,6 +161,16 @@ Selection targets and flags are documented in - Reserved fields for future use. Drivers and applications must zero this array. +.. note:: + Unfortunately in the case of multiplanar buffer types + (``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``) + this API was messed up with regards to how the :c:type:`v4l2_selection` ``type`` field + should be filled in. Some drivers only accepted the ``_MPLANE`` buffer type while + other drivers only accepted a non-multiplanar buffer type (i.e. without the + ``_MPLANE`` at the end). + + Starting with kernel 4.13 both variations are allowed. + Return Value ============ diff --git a/Documentation/media/v4l-drivers/index.rst b/Documentation/media/v4l-drivers/index.rst index aac566f88833..ba2aaeb4d126 100644 --- a/Documentation/media/v4l-drivers/index.rst +++ b/Documentation/media/v4l-drivers/index.rst @@ -45,6 +45,7 @@ For more details see the file COPYING in the source distribution of Linux. omap4_camera pvrusb2 pxa_camera + qcom_camss radiotrack saa7134 sh_mobile_ceu_camera diff --git a/Documentation/media/v4l-drivers/qcom_camss.rst b/Documentation/media/v4l-drivers/qcom_camss.rst new file mode 100644 index 000000000000..9e66b7b5770f --- /dev/null +++ b/Documentation/media/v4l-drivers/qcom_camss.rst @@ -0,0 +1,156 @@ +.. include:: <isonum.txt> + +Qualcomm Camera Subsystem driver +================================ + +Introduction +------------ + +This file documents the Qualcomm Camera Subsystem driver located under +drivers/media/platform/qcom/camss-8x16. + +The current version of the driver supports the Camera Subsystem found on +Qualcomm MSM8916 and APQ8016 processors. + +The driver implements V4L2, Media controller and V4L2 subdev interfaces. +Camera sensor using V4L2 subdev interface in the kernel is supported. + +The driver is implemented using as a reference the Qualcomm Camera Subsystem +driver for Android as found in Code Aurora [#f1]_. + + +Qualcomm Camera Subsystem hardware +---------------------------------- + +The Camera Subsystem hardware found on 8x16 processors and supported by the +driver consists of: + +- 2 CSIPHY modules. They handle the Physical layer of the CSI2 receivers. + A separate camera sensor can be connected to each of the CSIPHY module; +- 2 CSID (CSI Decoder) modules. They handle the Protocol and Application layer + of the CSI2 receivers. A CSID can decode data stream from any of the CSIPHY. + Each CSID also contains a TG (Test Generator) block which can generate + artificial input data for test purposes; +- ISPIF (ISP Interface) module. Handles the routing of the data streams from + the CSIDs to the inputs of the VFE; +- VFE (Video Front End) module. Contains a pipeline of image processing hardware + blocks. The VFE has different input interfaces. The PIX (Pixel) input + interface feeds the input data to the image processing pipeline. The image + processing pipeline contains also a scale and crop module at the end. Three + RDI (Raw Dump Interface) input interfaces bypass the image processing + pipeline. The VFE also contains the AXI bus interface which writes the output + data to memory. + + +Supported functionality +----------------------- + +The current version of the driver supports: + +- Input from camera sensor via CSIPHY; +- Generation of test input data by the TG in CSID; +- RDI interface of VFE - raw dump of the input data to memory. + + Supported formats: + + - YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV / + V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY); + - MIPI RAW8 (8bit Bayer RAW - V4L2_PIX_FMT_SRGGB8 / + V4L2_PIX_FMT_SGRBG8 / V4L2_PIX_FMT_SGBRG8 / V4L2_PIX_FMT_SBGGR8); + - MIPI RAW10 (10bit packed Bayer RAW - V4L2_PIX_FMT_SBGGR10P / + V4L2_PIX_FMT_SGBRG10P / V4L2_PIX_FMT_SGRBG10P / V4L2_PIX_FMT_SRGGB10P); + - MIPI RAW12 (12bit packed Bayer RAW - V4L2_PIX_FMT_SRGGB12P / + V4L2_PIX_FMT_SGBRG12P / V4L2_PIX_FMT_SGRBG12P / V4L2_PIX_FMT_SRGGB12P). + +- PIX interface of VFE + + - Format conversion of the input data. + + Supported input formats: + + - YUYV/UYVY/YVYU/VYUY (packed YUV 4:2:2 - V4L2_PIX_FMT_YUYV / + V4L2_PIX_FMT_UYVY / V4L2_PIX_FMT_YVYU / V4L2_PIX_FMT_VYUY). + + Supported output formats: + + - NV12/NV21 (two plane YUV 4:2:0 - V4L2_PIX_FMT_NV12 / V4L2_PIX_FMT_NV21); + - NV16/NV61 (two plane YUV 4:2:2 - V4L2_PIX_FMT_NV16 / V4L2_PIX_FMT_NV61). + + - Scaling support. Configuration of the VFE Encoder Scale module + for downscalling with ratio up to 16x. + + - Cropping support. Configuration of the VFE Encoder Crop module. + +- Concurrent and independent usage of two data inputs - could be camera sensors + and/or TG. + + +Driver Architecture and Design +------------------------------ + +The driver implements the V4L2 subdev interface. With the goal to model the +hardware links between the modules and to expose a clean, logical and usable +interface, the driver is split into V4L2 sub-devices as follows: + +- 2 CSIPHY sub-devices - each CSIPHY is represented by a single sub-device; +- 2 CSID sub-devices - each CSID is represented by a single sub-device; +- 2 ISPIF sub-devices - ISPIF is represented by a number of sub-devices equal + to the number of CSID sub-devices; +- 4 VFE sub-devices - VFE is represented by a number of sub-devices equal to + the number of the input interfaces (3 RDI and 1 PIX). + +The considerations to split the driver in this particular way are as follows: + +- representing CSIPHY and CSID modules by a separate sub-device for each module + allows to model the hardware links between these modules; +- representing VFE by a separate sub-devices for each input interface allows + to use the input interfaces concurently and independently as this is + supported by the hardware; +- representing ISPIF by a number of sub-devices equal to the number of CSID + sub-devices allows to create linear media controller pipelines when using two + cameras simultaneously. This avoids branches in the pipelines which otherwise + will require a) userspace and b) media framework (e.g. power on/off + operations) to make assumptions about the data flow from a sink pad to a + source pad on a single media entity. + +Each VFE sub-device is linked to a separate video device node. + +The media controller pipeline graph is as follows (with connected two OV5645 +camera sensors): + +.. _qcom_camss_graph: + +.. kernel-figure:: qcom_camss_graph.dot + :alt: qcom_camss_graph.dot + :align: center + + Media pipeline graph + + +Implementation +-------------- + +Runtime configuration of the hardware (updating settings while streaming) is +not required to implement the currently supported functionality. The complete +configuration on each hardware module is applied on STREAMON ioctl based on +the current active media links, formats and controls set. + +The output size of the scaler module in the VFE is configured with the actual +compose selection rectangle on the sink pad of the 'msm_vfe0_pix' entity. + +The crop output area of the crop module in the VFE is configured with the actual +crop selection rectangle on the source pad of the 'msm_vfe0_pix' entity. + + +Documentation +------------- + +APQ8016 Specification: +https://developer.qualcomm.com/download/sd410/snapdragon-410-processor-device-specification.pdf +Referenced 2016-11-24. + + +References +---------- + +.. [#f1] https://source.codeaurora.org/quic/la/kernel/msm-3.10/ diff --git a/Documentation/media/v4l-drivers/qcom_camss_graph.dot b/Documentation/media/v4l-drivers/qcom_camss_graph.dot new file mode 100644 index 000000000000..827fc7112c1e --- /dev/null +++ b/Documentation/media/v4l-drivers/qcom_camss_graph.dot @@ -0,0 +1,41 @@ +digraph board { + rankdir=TB + n00000001 [label="{{<port0> 0} | msm_csiphy0\n/dev/v4l-subdev0 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000001:port1 -> n00000007:port0 [style=dashed] + n00000001:port1 -> n0000000a:port0 [style=dashed] + n00000004 [label="{{<port0> 0} | msm_csiphy1\n/dev/v4l-subdev1 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000004:port1 -> n00000007:port0 [style=dashed] + n00000004:port1 -> n0000000a:port0 [style=dashed] + n00000007 [label="{{<port0> 0} | msm_csid0\n/dev/v4l-subdev2 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000007:port1 -> n0000000d:port0 [style=dashed] + n00000007:port1 -> n00000010:port0 [style=dashed] + n0000000a [label="{{<port0> 0} | msm_csid1\n/dev/v4l-subdev3 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green] + n0000000a:port1 -> n0000000d:port0 [style=dashed] + n0000000a:port1 -> n00000010:port0 [style=dashed] + n0000000d [label="{{<port0> 0} | msm_ispif0\n/dev/v4l-subdev4 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green] + n0000000d:port1 -> n00000013:port0 [style=dashed] + n0000000d:port1 -> n0000001c:port0 [style=dashed] + n0000000d:port1 -> n00000025:port0 [style=dashed] + n0000000d:port1 -> n0000002e:port0 [style=dashed] + n00000010 [label="{{<port0> 0} | msm_ispif1\n/dev/v4l-subdev5 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000010:port1 -> n00000013:port0 [style=dashed] + n00000010:port1 -> n0000001c:port0 [style=dashed] + n00000010:port1 -> n00000025:port0 [style=dashed] + n00000010:port1 -> n0000002e:port0 [style=dashed] + n00000013 [label="{{<port0> 0} | msm_vfe0_rdi0\n/dev/v4l-subdev6 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000013:port1 -> n00000016 [style=bold] + n00000016 [label="msm_vfe0_video0\n/dev/video0", shape=box, style=filled, fillcolor=yellow] + n0000001c [label="{{<port0> 0} | msm_vfe0_rdi1\n/dev/v4l-subdev7 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green] + n0000001c:port1 -> n0000001f [style=bold] + n0000001f [label="msm_vfe0_video1\n/dev/video1", shape=box, style=filled, fillcolor=yellow] + n00000025 [label="{{<port0> 0} | msm_vfe0_rdi2\n/dev/v4l-subdev8 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green] + n00000025:port1 -> n00000028 [style=bold] + n00000028 [label="msm_vfe0_video2\n/dev/video2", shape=box, style=filled, fillcolor=yellow] + n0000002e [label="{{<port0> 0} | msm_vfe0_pix\n/dev/v4l-subdev9 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green] + n0000002e:port1 -> n00000031 [style=bold] + n00000031 [label="msm_vfe0_video3\n/dev/video3", shape=box, style=filled, fillcolor=yellow] + n00000057 [label="{{} | ov5645 1-0076\n/dev/v4l-subdev10 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green] + n00000057:port0 -> n00000001:port0 [style=bold] + n00000059 [label="{{} | ov5645 1-0074\n/dev/v4l-subdev11 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green] + n00000059:port0 -> n00000004:port0 [style=bold] +} diff --git a/MAINTAINERS b/MAINTAINERS index 63cefa62324c..bde9ae7b632b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9977,6 +9977,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git S: Supported F: drivers/net/wireless/ath/ath10k/ +QUALCOMM CAMERA SUBSYSTEM DRIVER +M: Todor Tomov <todor.tomov@linaro.org> +L: linux-media@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/media/qcom,camss.txt +F: Documentation/media/v4l-drivers/qcom_camss.rst +F: drivers/media/platform/qcom/camss/ + QUALCOMM EMAC GIGABIT ETHERNET DRIVER M: Timur Tabi <timur@codeaurora.org> L: netdev@vger.kernel.org @@ -9990,6 +9998,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.g S: Supported F: arch/hexagon/ +QUALCOMM VENUS VIDEO ACCELERATOR DRIVER +M: Stanimir Varbanov <stanimir.varbanov@linaro.org> +L: linux-media@vger.kernel.org +L: linux-arm-msm@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/platform/qcom/venus/ + QUALCOMM WCN36XX WIRELESS DRIVER M: Eugene Krasnikov <k.eugene.e@gmail.com> L: wcn36xx@lists.infradead.org diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 7037201c5e3a..545a1636a4b9 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -609,6 +609,7 @@ dtb-$(CONFIG_ARCH_OXNAS) += \ dtb-$(CONFIG_ARCH_QCOM) += \ qcom-apq8060-dragonboard.dtb \ qcom-apq8064-arrow-sd-600eval.dtb \ + qcom-apq8064-eI_ERAGON600.dtb \ qcom-apq8064-cm-qs600.dtb \ qcom-apq8064-ifc6410.dtb \ qcom-apq8064-sony-xperia-yuga.dtb \ diff --git a/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts index 39ae2bc8cb08..4e908afdb738 100644 --- a/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts +++ b/arch/arm/boot/dts/qcom-apq8064-arrow-sd-600eval.dts @@ -39,6 +39,17 @@ }; + hdmi-out { + compatible = "hdmi-connector"; + type = "a"; + + port { + hdmi_con: endpoint { + remote-endpoint = <&hdmi_out>; + }; + }; + }; + soc { rpm@108000 { regulators { @@ -347,5 +358,38 @@ cd-gpios = <&tlmm_pinmux 26 GPIO_ACTIVE_HIGH>; }; }; + + hdmi-tx@4a00000 { + status = "okay"; + core-vdda-supply = <&pm8921_hdmi_switch>; + hdmi-mux-supply = <&vcc3v3>; + + hpd-gpio = <&tlmm_pinmux 72 GPIO_ACTIVE_HIGH>; + + ports { + port@1 { + endpoint { + remote-endpoint = <&hdmi_con>; + }; + }; + }; + }; + + hdmi-phy@4a00400 { + status = "okay"; + core-vdda-supply = <&pm8921_hdmi_switch>; + }; + + mdp@5100000 { + status = "okay"; + + ports { + port@3 { + endpoint { + remote-endpoint = <&hdmi_in>; + }; + }; + }; + }; }; }; diff --git a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts index b72e09506448..e39440a86739 100644 --- a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts +++ b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts @@ -15,6 +15,20 @@ stdout-path = "serial0:115200n8"; }; + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + ramoops@88d00000{ + compatible = "ramoops"; + reg = <0x88d00000 0x100000>; + record-size = <0x00020000>; + console-size = <0x00020000>; + ftrace-size = <0x00020000>; + }; + }; + ext_3p3v: regulator-fixed@1 { compatible = "regulator-fixed"; regulator-min-microvolt = <3300000>; @@ -99,6 +113,7 @@ l2 { regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1200000>; + regulator-always-on; }; /* msm_otg-HSUSB_3p3 */ @@ -133,13 +148,14 @@ regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; bias-pull-down; + regulator-always-on; }; /* pwm_power for backlight */ l17 { regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3600000>; - bias-pull-down; + regulator-max-microvolt = <3000000>; + regulator-always-on; }; /* camera, qdsp6 */ @@ -184,6 +200,63 @@ }; }; + mdp@5100000 { + status = "okay"; + ports { + port@1 { + mdp_dsi1_out: endpoint { + remote-endpoint = <&dsi0_in>; + }; + }; + }; + }; + + dsi0: mdss_dsi@4700000 { + status = "okay"; + vdda-supply = <&pm8921_l2>;/*VDD_MIPI1 to 4*/ + vdd-supply = <&pm8921_l8>; + vddio-supply = <&pm8921_lvs7>; + avdd-supply = <&pm8921_l11>; + vcss-supply = <&ext_3p3v>; + + panel@0 { + reg = <0>; + compatible = "jdi,lt070me05000"; + + vddp-supply = <&pm8921_l17>; + iovcc-supply = <&pm8921_lvs7>; + + enable-gpios = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>; + reset-gpios = <&tlmm_pinmux 54 GPIO_ACTIVE_LOW>; + dcdc-en-gpios = <&pm8921_gpio 23 GPIO_ACTIVE_HIGH>; + + port { + panel_in: endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + }; + ports { + port@0 { + dsi0_in: endpoint { + remote-endpoint = <&mdp_dsi1_out>; + }; + }; + + port@1 { + dsi0_out: endpoint { + remote-endpoint = <&panel_in>; + data-lanes = <0 1 2 3>; + }; + }; + }; + }; + + dsi-phy@4700200 { + status = "okay"; + vddio-supply = <&pm8921_lvs7>;/*VDD_PLL2_1 to 7*/ + }; + gsbi@16200000 { status = "okay"; qcom,mode = <GSBI_PROT_I2C>; diff --git a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts index 35f1d46edded..17d619fd919a 100644 --- a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts +++ b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts @@ -28,6 +28,17 @@ }; }; + hdmi-out { + compatible = "hdmi-connector"; + type = "d"; + + port { + hdmi_con: endpoint { + remote-endpoint = <&hdmi_out>; + }; + }; + }; + soc { pinctrl@800000 { card_detect: card_detect { @@ -246,5 +257,36 @@ mmc-pwrseq = <&sdcc4_pwrseq>; }; }; + + hdmi-tx@4a00000 { + status = "okay"; + core-vdda-supply = <&pm8921_hdmi_switch>; + hdmi-mux-supply = <&v3p3_fixed>; + + ports { + port@1 { + endpoint { + remote-endpoint = <&hdmi_con>; + }; + }; + }; + }; + + hdmi-phy@4a00400 { + status = "okay"; + core-vdda-supply = <&pm8921_hdmi_switch>; + }; + + mdp@5100000 { + status = "okay"; + + ports { + port@3 { + endpoint { + remote-endpoint = <&hdmi_in>; + }; + }; + }; + }; }; }; diff --git a/arch/arm/boot/dts/qcom-apq8064-coresight.dtsi b/arch/arm/boot/dts/qcom-apq8064-coresight.dtsi new file mode 100644 index 000000000000..9395fddb1bf0 --- /dev/null +++ b/arch/arm/boot/dts/qcom-apq8064-coresight.dtsi @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + + etb@1a01000 { + compatible = "coresight-etb10", "arm,primecell"; + reg = <0x1a01000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + etb_in: endpoint { + slave-mode; + remote-endpoint = <&replicator_out0>; + }; + }; + }; + + tpiu@1a03000 { + compatible = "arm,coresight-tpiu", "arm,primecell"; + reg = <0x1a03000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>; + clock-names = "apb_pclk"; + + port { + tpiu_in: endpoint { + slave-mode; + remote-endpoint = <&replicator_out1>; + }; + }; + }; + + replicator { + compatible = "arm,coresight-replicator"; + + clocks = <&rpmcc RPM_QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + replicator_out0: endpoint { + remote-endpoint = <&etb_in>; + }; + }; + port@1 { + reg = <1>; + replicator_out1: endpoint { + remote-endpoint = <&tpiu_in>; + }; + }; + port@2 { + reg = <0>; + replicator_in: endpoint { + slave-mode; + remote-endpoint = <&funnel_out>; + }; + }; + }; + }; + + funnel@1a04000 { + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0x1a04000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>; + clock-names = "apb_pclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* + * Not described input ports: + * 2 - connected to STM component + * 3 - not-connected + * 6 - not-connected + * 7 - not-connected + */ + port@0 { + reg = <0>; + funnel_in0: endpoint { + slave-mode; + remote-endpoint = <&etm0_out>; + }; + }; + port@1 { + reg = <1>; + funnel_in1: endpoint { + slave-mode; + remote-endpoint = <&etm1_out>; + }; + }; + port@4 { + reg = <4>; + funnel_in4: endpoint { + slave-mode; + remote-endpoint = <&etm2_out>; + }; + }; + port@5 { + reg = <5>; + funnel_in5: endpoint { + slave-mode; + remote-endpoint = <&etm3_out>; + }; + }; + port@8 { + reg = <0>; + funnel_out: endpoint { + remote-endpoint = <&replicator_in>; + }; + }; + }; + }; + + etm@1a1c000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0x1a1c000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>; + clock-names = "apb_pclk"; + + cpu = <&CPU0>; + + port { + etm0_out: endpoint { + remote-endpoint = <&funnel_in0>; + }; + }; + }; + + etm@1a1d000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0x1a1d000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>; + clock-names = "apb_pclk"; + + cpu = <&CPU1>; + + port { + etm1_out: endpoint { + remote-endpoint = <&funnel_in1>; + }; + }; + }; + + etm@1a1e000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0x1a1e000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>; + clock-names = "apb_pclk"; + + cpu = <&CPU2>; + + port { + etm2_out: endpoint { + remote-endpoint = <&funnel_in4>; + }; + }; + }; + + etm@1a1f000 { + compatible = "arm,coresight-etm3x", "arm,primecell"; + reg = <0x1a1f000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>; + clock-names = "apb_pclk"; + + cpu = <&CPU3>; + + port { + etm3_out: endpoint { + remote-endpoint = <&funnel_in5>; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/qcom-apq8064-eI_ERAGON600.dts b/arch/arm/boot/dts/qcom-apq8064-eI_ERAGON600.dts new file mode 100644 index 000000000000..86d84f912229 --- /dev/null +++ b/arch/arm/boot/dts/qcom-apq8064-eI_ERAGON600.dts @@ -0,0 +1,458 @@ +#include "qcom-apq8064-v2.0.dtsi" +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/pinctrl/qcom,pmic-gpio.h> + +/ { + model = "Qualcomm APQ8064/ERAGON600"; + compatible = "qcom,apq8064-eragon600", "qcom,apq8064"; + + aliases { + serial0 = &gsbi7_serial; + serial1 = &gsbi6_serial; + }; + + hdmi-out { + compatible = "hdmi-connector"; + type = "d"; + + port { + hdmi_con: endpoint { + remote-endpoint = <&hdmi_out>; + }; + }; + }; + + soc { + pinctrl@800000 { + card_detect: card_detect { + mux { + pins = "gpio26"; + function = "gpio"; + bias-disable; + }; + }; + }; + + rpm@108000 { + regulators { + vin_lvs1_3_6-supply = <&pm8921_s4>; + vin_lvs2-supply = <&pm8921_s1>; + vin_lvs4_5_7-supply = <&pm8921_s4>; + + vdd_l1_l2_l12_l18-supply = <&pm8921_s4>; + vdd_l24-supply = <&pm8921_s1>; + vdd_l25-supply = <&pm8921_s1>; + vdd_l26-supply = <&pm8921_s7>; + vdd_l27-supply = <&pm8921_s7>; + vdd_l28-supply = <&pm8921_s7>; + + + /* Buck SMPS */ + pm8921_s1: s1 { + regulator-always-on; + regulator-min-microvolt = <1225000>; + regulator-max-microvolt = <1225000>; + qcom,switch-mode-frequency = <3200000>; + bias-pull-down; + }; + + pm8921_s3: s3 { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1400000>; + qcom,switch-mode-frequency = <4800000>; + }; + + pm8921_s4: s4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + qcom,switch-mode-frequency = <3200000>; + qcom,force-mode = <3>; + }; + + pm8921_s7: s7 { + regulator-min-microvolt = <1300000>; + regulator-max-microvolt = <1300000>; + qcom,switch-mode-frequency = <3200000>; + }; + + pm8921_l2: l2 { + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + bias-pull-down; + }; + + pm8921_l3: l3 { + regulator-min-microvolt = <3050000>; + regulator-max-microvolt = <3300000>; + bias-pull-down; + }; + + pm8921_l4: l4 { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1800000>; + bias-pull-down; + }; + + pm8921_l5: l5 { + regulator-min-microvolt = <2750000>; + regulator-max-microvolt = <3000000>; + bias-pull-down; + }; + + pm8921_l6: l6 { + regulator-min-microvolt = <2950000>; + regulator-max-microvolt = <2950000>; + bias-pull-down; + }; + + pm8921_l23: l23 { + regulator-min-microvolt = <1700000>; + regulator-max-microvolt = <1900000>; + bias-pull-down; + }; + + pm8921_lvs1: lvs1 { + bias-pull-down; + }; + + pm8921_lvs6: lvs6 { + bias-pull-down; + }; + + pm8921_lvs7: lvs7 { + bias-pull-down; + }; + }; + }; + + ext_3p3v: regulator-fixed@1 { + compatible = "regulator-fixed"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "ext_3p3v"; + regulator-type = "voltage"; + startup-delay-us = <0>; + gpio = <&tlmm_pinmux 77 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-boot-on; + }; + + hdmi-tx@4a00000 { + status = "okay"; + core-vdda-supply = <&pm8921_hdmi_switch>; + hdmi-mux-supply = <&ext_3p3v>; + + hpd-gpio = <&tlmm_pinmux 72 GPIO_ACTIVE_HIGH>; + + ports { + port@0 { + endpoint { + remote-endpoint = <&mdp_dtv_out>; + }; + }; + + port@1 { + endpoint { + remote-endpoint = <&hdmi_con>; + }; + }; + }; + }; + + hdmi-phy@4a00400 { + status = "okay"; + core-vdda-supply = <&pm8921_hdmi_switch>; + }; + + mdp@5100000 { + status = "okay"; + lvds-vccs-3p3v-supply = <&ext_3p3v>; + lvds-pll-vdda-supply = <&pm8921_l2>; + lvds-vdda-supply = <&pm8921_lvs7>; + + port { + lvds_out: endpoint { + remote-endpoint = <&data_image_in>; + }; + }; + }; + + panel_3p3v: panel_3p3v { + pinctrl-0 = <&pwm_en_gpios>; + pinctrl-names = "default"; + compatible = "regulator-fixed"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "panel_en_3p3v"; + regulator-type = "voltage"; + startup-delay-us = <0>; + gpio = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-boot-on; + }; + + backlight: backlight{ + pinctrl-0 = <&pwm_bl_gpios>; + pinctrl-names = "default"; + compatible = "gpio-backlight"; + gpios = <&pm8921_gpio 26 GPIO_ACTIVE_HIGH>; + default-on; + }; + + levelshifter: levelshifter{ + pinctrl-0 = <&pwm_bl_gpios>; + pinctrl-names = "default"; + compatible = "gpio-backlight"; + gpios = <&tlmm_pinmux 85 GPIO_ACTIVE_HIGH>; + startup-delay-us = <2000>; + default-on; + }; + + panel: data_image,scf0700C48ggu21 { + status = "okay"; + compatible = "data_image,scf0700C48ggu21"; + ddc-i2c-bus = <&i2c3>; + backlight = <&backlight>; + power-supply = <&panel_3p3v>; + port { + data_image_in: endpoint { + remote-endpoint = <&lvds_out>; + }; + }; + }; + + gsbi3: gsbi@16200000 { + status = "okay"; + qcom,mode = <GSBI_PROT_I2C>; + i2c3: i2c@16280000 { + status = "okay"; + pinctrl-0 = <&i2c3_pins>; + pinctrl-names = "default"; + }; + }; + + gsbi@12440000 { + status = "okay"; + qcom,mode = <GSBI_PROT_I2C>; + + i2c@12460000 { + status = "okay"; + clock-frequency = <200000>; + pinctrl-0 = <&i2c1_pins>; + pinctrl-names = "default"; + + eeprom: eeprom@52 { + compatible = "atmel,24c128"; + reg = <0x52>; + pagesize = <32>; + }; + }; + }; + + gsbi@16500000 { + status = "ok"; + qcom,mode = <GSBI_PROT_UART_W_FC>; + + serial@16540000 { + status = "ok"; + + pinctrl-names = "default"; + pinctrl-0 = <&gsbi6_uart_4pins>; + }; + }; + + gsbi@16600000 { + status = "ok"; + qcom,mode = <GSBI_PROT_I2C_UART>; + serial@16640000 { + status = "ok"; + }; + }; + + adm: dma@18320000 { + status = "okay"; + }; + + sata_phy0: phy@1b400000 { + status = "okay"; + }; + + sata0: sata@29000000 { + status = "okay"; + target-supply = <&pm8921_s4>; + }; + + /* OTG */ + usb1_phy: phy@12500000 { + status = "okay"; + vddcx-supply = <&pm8921_s3>; + v3p3-supply = <&pm8921_l3>; + v1p8-supply = <&pm8921_l4>; + }; + + usb3_phy: phy@12520000 { + status = "okay"; + vddcx-supply = <&pm8921_s3>; + v3p3-supply = <&pm8921_l3>; + v1p8-supply = <&pm8921_l23>; + }; + + usb4_phy: phy@12530000 { + status = "okay"; + vddcx-supply = <&pm8921_s3>; + v3p3-supply = <&pm8921_l3>; + v1p8-supply = <&pm8921_l23>; + }; + + gadget1: gadget@12500000 { + status = "okay"; + }; + + /* OTG */ + usb1: usb@12500000 { + status = "okay"; + }; + + usb3: usb@12520000 { + status = "okay"; + }; + + usb4: usb@12530000 { + status = "okay"; + }; + + /* on board fixed 3.3v supply */ + v3p3_pcieclk: v3p3-pcieclk { + compatible = "regulator-fixed"; + regulator-name = "PCIE V3P3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + pci@1b500000 { + status = "ok"; + pcie-clk-supply = <&v3p3_pcieclk>; + avdd-supply = <&pm8921_s3>; + vdd-supply = <&pm8921_lvs6>; + ext-3p3v-supply = <&ext_3p3v>; + qcom,external-phy-refclk; + reset-gpio = <&tlmm_pinmux 27 GPIO_ACTIVE_LOW>; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <¬ify_led>; + + led@1 { + label = "apq8064:green:user1"; + gpios = <&pm8921_gpio 18 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + default-state = "on"; + }; + }; + + qcom,ssbi@500000 { + pmicintc: pmic@0 { + pm8921_gpio: gpio@150 { + pinctrl-names = "default"; + pinctrl-0 = <&wlan_default_gpios &bt_gpios>; + + pwm_bl_gpios: pwm-bl-gpios { + pios { + pins = "gpio26"; + bias-disable; + function = "normal"; + qcom,drive-strength = <1>; + power-source = <PM8921_GPIO_S4>; + }; + }; + + pwm_en_gpios: pwm-en-gpios { + pios { + pins = "gpio36"; + bias-disable; + function = "normal"; + qcom,drive-strength = <1>; + power-source = <PM8921_GPIO_S4>; + }; + }; + + wlan_default_gpios: wlan-gpios { + pios { + pins = "gpio43"; + function = "normal"; + bias-disable; + power-source = <PM8921_GPIO_S4>; + }; + }; + + bt_gpios: bt-gpio { + pios { + pins = "gpio44"; + function = "normal"; + bias-disable; + power-source = <PM8921_GPIO_S4>; + }; + }; + + notify_led: nled { + pios { + pins = "gpio18"; + function = "normal"; + bias-disable; + power-source = <PM8921_GPIO_S4>; + }; + }; + }; + }; + }; + sdcc4_pwrseq:pwrseq { + compatible = "mmc-pwrseq-simple"; + reset-gpios = <&pm8921_gpio 43 GPIO_ACTIVE_LOW>, + <&pm8921_gpio 44 GPIO_ACTIVE_LOW>; + }; + + amba { + /* eMMC */ + sdcc1: sdcc@12400000 { + status = "okay"; + vmmc-supply = <&pm8921_l5>; + vqmmc-supply = <&pm8921_s4>; + }; + + /* External micro SD card */ + sdcc3: sdcc@12180000 { + status = "okay"; + vmmc-supply = <&pm8921_l6>; + pinctrl-names = "default"; + pinctrl-0 = <&card_detect>; + cd-gpios = <&tlmm_pinmux 26 GPIO_ACTIVE_LOW>; + }; + /* WLAN */ + sdcc4: sdcc@121c0000 { + status = "okay"; + vmmc-supply = <&ext_3p3v>; + vqmmc-supply = <&pm8921_lvs1>; + mmc-pwrseq = <&sdcc4_pwrseq>; + }; + }; + }; +}; + +&CPU0 { + cpu-supply = <&saw0>; +}; + +&CPU1 { + cpu-supply = <&saw1>; +}; + +&CPU2 { + cpu-supply = <&saw2>; +}; + +&CPU3 { + cpu-supply = <&saw3>; +}; diff --git a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts index 2eeb0904eaa7..cb714cbb4f74 100644 --- a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts +++ b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts @@ -25,9 +25,10 @@ sdcc4_pwrseq: sdcc4_pwrseq { pinctrl-names = "default"; - pinctrl-0 = <&wlan_default_gpios>; + pinctrl-0 = <&wlan_default_gpios &bt_gpios>; compatible = "mmc-pwrseq-simple"; - reset-gpios = <&pm8921_gpio 43 GPIO_ACTIVE_LOW>; + reset-gpios = <&pm8921_gpio 43 GPIO_ACTIVE_LOW>, + <&pm8921_gpio 44 GPIO_ACTIVE_LOW>; }; }; @@ -43,6 +44,22 @@ }; }; + hdmi-out { + compatible = "hdmi-connector"; + type = "d"; + + port { + hdmi_con: endpoint { + remote-endpoint = <&hdmi_out>; + }; + }; + }; + + smd { + q6@1 { + status = "okay"; + }; + }; soc { pinctrl@800000 { card_detect: card_detect { @@ -137,6 +154,12 @@ bias-pull-down; }; + l26 { + regulator-min-microvolt = < 375000>; + regulator-max-microvolt = <1050000>; + bias-pull-down; + }; + lvs1 { bias-pull-down; }; @@ -144,6 +167,10 @@ lvs6 { bias-pull-down; }; + + lvs7 { + bias-pull-down; + }; }; }; @@ -159,6 +186,11 @@ regulator-boot-on; }; + pil_q6v4: pil@28800000 { + qcom,pll-supply = <&pm8921_l26>; + qcom,pll-uV = <1050000>; + }; + gsbi3: gsbi@16200000 { status = "okay"; qcom,mode = <GSBI_PROT_I2C>; @@ -223,6 +255,10 @@ }; }; + adm: dma@18320000 { + status = "okay"; + }; + sata_phy0: phy@1b400000 { status = "okay"; }; @@ -293,6 +329,15 @@ }; }; + bt_gpios: bt-gpio { + pios { + pins = "gpio44"; + function = "normal"; + bias-disable; + power-source = <PM8921_GPIO_S4>; + }; + }; + notify_led: nled { pios { pins = "gpio18"; @@ -329,5 +374,46 @@ mmc-pwrseq = <&sdcc4_pwrseq>; }; }; + + hdmi-tx@4a00000 { + status = "okay"; + + core-vdda-supply = <&pm8921_hdmi_switch>; + hdmi-mux-supply = <&ext_3p3v>; + + hpd-gpios = <&tlmm_pinmux 72 GPIO_ACTIVE_HIGH>; + + ports { + port@0 { + endpoint { + remote-endpoint = <&mdp_dtv_out>; + }; + }; + + port@1 { + endpoint { + remote-endpoint = <&hdmi_con>; + }; + }; + }; + }; + + hdmi-phy@4a00400 { + status = "okay"; + + core-vdda-supply = <&pm8921_hdmi_switch>; + }; + + mdp@5100000 { + status = "okay"; + + ports { + port@3 { + endpoint { + remote-endpoint = <&hdmi_in>; + }; + }; + }; + }; }; }; diff --git a/arch/arm/boot/dts/qcom-apq8064-pins.dtsi b/arch/arm/boot/dts/qcom-apq8064-pins.dtsi index 6b801e7e57a2..390028b57bfa 100644 --- a/arch/arm/boot/dts/qcom-apq8064-pins.dtsi +++ b/arch/arm/boot/dts/qcom-apq8064-pins.dtsi @@ -284,4 +284,43 @@ bias-disable = <0>; }; }; + + hdmi_pinctrl: hdmi-pinctrl { + mux { + pins = "gpio70", "gpio71", "gpio72"; + function = "hdmi"; + }; + + pinconf_ddc { + pins = "gpio70", "gpio71"; + bias-pull-up; + drive-strength = <2>; + }; + + pinconf_hpd { + pins = "gpio72"; + bias-pull-down; + drive-strength = <16>; + }; + }; + + wcnss_pin_a: wcnss-pins-active { + fm { + pins = "gpio14", "gpio15"; + function = "riva_fm"; + }; + + bt { + pins = "gpio16", "gpio17"; + function = "riva_bt"; + }; + + wlan { + pins = "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; + function = "riva_wlan"; + + drive-strength = <6>; + bias-pull-down; + }; + }; }; diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi index 1dbe697b2e90..1e2c76e0ff7e 100644 --- a/arch/arm/boot/dts/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom-apq8064.dtsi @@ -4,9 +4,12 @@ #include <dt-bindings/clock/qcom,gcc-msm8960.h> #include <dt-bindings/reset/qcom,gcc-msm8960.h> #include <dt-bindings/clock/qcom,mmcc-msm8960.h> +#include <dt-bindings/clock/qcom,rpmcc.h> #include <dt-bindings/soc/qcom,gsbi.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/interrupt-controller/arm-gic.h> +#include <dt-bindings/thermal/thermal.h> + / { model = "Qualcomm APQ8064"; compatible = "qcom,apq8064"; @@ -21,13 +24,18 @@ reg = <0x80000000 0x200000>; no-map; }; + + wcnss_mem: wcnss@8f000000 { + reg = <0x8f000000 0x700000>; + no-map; + }; }; cpus { #address-cells = <1>; #size-cells = <0>; - cpu@0 { + CPU0: cpu@0 { compatible = "qcom,krait"; enable-method = "qcom,kpss-acc-v1"; device_type = "cpu"; @@ -36,9 +44,16 @@ qcom,acc = <&acc0>; qcom,saw = <&saw0>; cpu-idle-states = <&CPU_SPC>; + clocks = <&kraitcc 0>, <&kraitcc 4>; + clock-names = "cpu", "l2"; + clock-latency = <100000>; + cpu-supply = <&saw0_regulator>; + cooling-min-level = <0>; + cooling-max-level = <7>; + #cooling-cells = <2>; }; - cpu@1 { + CPU1: cpu@1 { compatible = "qcom,krait"; enable-method = "qcom,kpss-acc-v1"; device_type = "cpu"; @@ -47,9 +62,16 @@ qcom,acc = <&acc1>; qcom,saw = <&saw1>; cpu-idle-states = <&CPU_SPC>; + clocks = <&kraitcc 1>, <&kraitcc 4>; + clock-names = "cpu", "l2"; + clock-latency = <100000>; + cpu-supply = <&saw1_regulator>; + cooling-min-level = <0>; + cooling-max-level = <7>; + #cooling-cells = <2>; }; - cpu@2 { + CPU2: cpu@2 { compatible = "qcom,krait"; enable-method = "qcom,kpss-acc-v1"; device_type = "cpu"; @@ -58,9 +80,16 @@ qcom,acc = <&acc2>; qcom,saw = <&saw2>; cpu-idle-states = <&CPU_SPC>; + clocks = <&kraitcc 2>, <&kraitcc 4>; + clock-names = "cpu", "l2"; + clock-latency = <100000>; + cpu-supply = <&saw2_regulator>; + cooling-min-level = <0>; + cooling-max-level = <7>; + #cooling-cells = <2>; }; - cpu@3 { + CPU3: cpu@3 { compatible = "qcom,krait"; enable-method = "qcom,kpss-acc-v1"; device_type = "cpu"; @@ -69,6 +98,13 @@ qcom,acc = <&acc3>; qcom,saw = <&saw3>; cpu-idle-states = <&CPU_SPC>; + clocks = <&kraitcc 3>, <&kraitcc 4>; + clock-names = "cpu", "l2"; + clock-latency = <100000>; + cpu-supply = <&saw3_regulator>; + cooling-min-level = <0>; + cooling-max-level = <7>; + #cooling-cells = <2>; }; L2: l2-cache { @@ -76,6 +112,10 @@ cache-level = <2>; }; + qcom,l2 { + qcom,l2-rates = <384000000 972000000 1188000000>; + }; + idle-states { CPU_SPC: spc { compatible = "qcom,idle-state-spc", @@ -107,6 +147,13 @@ type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu_alert0>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; cpu-thermal1 { @@ -128,6 +175,13 @@ type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu_alert1>; + cooling-device = <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; cpu-thermal2 { @@ -149,6 +203,13 @@ type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu_alert2>; + cooling-device = <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; cpu-thermal3 { @@ -170,6 +231,13 @@ type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu_alert3>; + cooling-device = <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; }; @@ -178,6 +246,11 @@ interrupts = <1 10 0x304>; }; + kraitcc: clock-controller { + compatible = "qcom,krait-cc-v1"; + #clock-cells = <1>; + }; + clocks { cxo_board { compatible = "fixed-clock"; @@ -211,6 +284,459 @@ hwlocks = <&sfpb_mutex 3>; }; + smsm { + compatible = "qcom,smsm"; + + #address-cells = <1>; + #size-cells = <0>; + + qcom,ipc-1 = <&l2cc 8 4>; + qcom,ipc-2 = <&l2cc 8 14>; + qcom,ipc-3 = <&l2cc 8 23>; + qcom,ipc-4 = <&sps_sic_non_secure 0x4094 0>; + + apps_smsm: apps@0 { + reg = <0>; + #qcom,smem-state-cells = <1>; + }; + + modem_smsm: modem@1 { + reg = <1>; + interrupts = <0 38 IRQ_TYPE_EDGE_RISING>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + + q6_smsm: q6@2 { + reg = <2>; + interrupts = <0 89 IRQ_TYPE_EDGE_RISING>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + + wcnss_smsm: wcnss@3 { + reg = <3>; + interrupts = <0 204 IRQ_TYPE_EDGE_RISING>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + + dsps_smsm: dsps@4 { + reg = <4>; + interrupts = <0 137 IRQ_TYPE_EDGE_RISING>; + + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + qcom,pvs { + qcom,pvs-format-a; + qcom,speed0-pvs0-bin-v0 = + < 384000000 950000 >, + < 486000000 975000 >, + < 594000000 1000000 >, + < 702000000 1025000 >, + < 810000000 1075000 >, + < 918000000 1100000 >, + < 1026000000 1125000 >, + < 1080000000 1175000 >, + < 1134000000 1175000 >, + < 1188000000 1200000 >, + < 1242000000 1200000 >, + < 1296000000 1225000 >, + < 1350000000 1225000 >, + < 1404000000 1237500 >, + < 1458000000 1237500 >, + < 1512000000 1250000 >; + + qcom,speed0-pvs1-bin-v0 = + < 384000000 900000 >, + < 486000000 925000 >, + < 594000000 950000 >, + < 702000000 975000 >, + < 810000000 1025000 >, + < 918000000 1050000 >, + < 1026000000 1075000 >, + < 1080000000 1125000 >, + < 1134000000 1125000 >, + < 1188000000 1150000 >, + < 1242000000 1150000 >, + < 1296000000 1175000 >, + < 1350000000 1175000 >, + < 1404000000 1187500 >, + < 1458000000 1187500 >, + < 1512000000 1200000 >; + + qcom,speed0-pvs3-bin-v0 = + < 384000000 850000 >, + < 486000000 875000 >, + < 594000000 900000 >, + < 702000000 925000 >, + < 810000000 975000 >, + < 918000000 1000000 >, + < 1026000000 1025000 >, + < 1080000000 1075000 >, + < 1134000000 1075000 >, + < 1188000000 1100000 >, + < 1242000000 1100000 >, + < 1296000000 1125000 >, + < 1350000000 1125000 >, + < 1404000000 1137500 >, + < 1458000000 1137500 >, + < 1512000000 1150000 >; + + qcom,speed0-pvs4-bin-v0 = + < 384000000 850000 >, + < 486000000 875000 >, + < 594000000 900000 >, + < 702000000 925000 >, + < 810000000 962500 >, + < 918000000 975000 >, + < 1026000000 1000000 >, + < 1080000000 1050000 >, + < 1134000000 1050000 >, + < 1188000000 1075000 >, + < 1242000000 1075000 >, + < 1296000000 1100000 >, + < 1350000000 1100000 >, + < 1404000000 1112500 >, + < 1458000000 1112500 >, + < 1512000000 1125000 >; + + qcom,speed1-pvs0-bin-v0 = + < 384000000 950000 >, + < 486000000 950000 >, + < 594000000 950000 >, + < 702000000 962500 >, + < 810000000 1000000 >, + < 918000000 1025000 >, + < 1026000000 1037500 >, + < 1134000000 1075000 >, + < 1242000000 1087500 >, + < 1350000000 1125000 >, + < 1458000000 1150000 >, + < 1566000000 1175000 >, + < 1674000000 1225000 >, + < 1728000000 1250000 >; + + qcom,speed1-pvs1-bin-v0 = + < 384000000 950000 >, + < 486000000 950000 >, + < 594000000 950000 >, + < 702000000 962500 >, + < 810000000 975000 >, + < 918000000 1000000 >, + < 1026000000 1012500 >, + < 1134000000 1037500 >, + < 1242000000 1050000 >, + < 1350000000 1087500 >, + < 1458000000 1112500 >, + < 1566000000 1150000 >, + < 1674000000 1187500 >, + < 1728000000 1200000 >; + + qcom,speed1-pvs2-bin-v0 = + < 384000000 925000 >, + < 486000000 925000 >, + < 594000000 925000 >, + < 702000000 925000 >, + < 810000000 937500 >, + < 918000000 950000 >, + < 1026000000 975000 >, + < 1134000000 1000000 >, + < 1242000000 1012500 >, + < 1350000000 1037500 >, + < 1458000000 1075000 >, + < 1566000000 1100000 >, + < 1674000000 1137500 >, + < 1728000000 1162500 >; + + qcom,speed1-pvs3-bin-v0 = + < 384000000 900000 >, + < 486000000 900000 >, + < 594000000 900000 >, + < 702000000 900000 >, + < 810000000 900000 >, + < 918000000 925000 >, + < 1026000000 950000 >, + < 1134000000 975000 >, + < 1242000000 987500 >, + < 1350000000 1000000 >, + < 1458000000 1037500 >, + < 1566000000 1062500 >, + < 1674000000 1100000 >, + < 1728000000 1125000 >; + + qcom,speed1-pvs4-bin-v0 = + < 384000000 875000 >, + < 486000000 875000 >, + < 594000000 875000 >, + < 702000000 875000 >, + < 810000000 887500 >, + < 918000000 900000 >, + < 1026000000 925000 >, + < 1134000000 950000 >, + < 1242000000 962500 >, + < 1350000000 975000 >, + < 1458000000 1000000 >, + < 1566000000 1037500 >, + < 1674000000 1075000 >, + < 1728000000 1100000 >; + + qcom,speed1-pvs5-bin-v0 = + < 384000000 875000 >, + < 486000000 875000 >, + < 594000000 875000 >, + < 702000000 875000 >, + < 810000000 887500 >, + < 918000000 900000 >, + < 1026000000 925000 >, + < 1134000000 937500 >, + < 1242000000 950000 >, + < 1350000000 962500 >, + < 1458000000 987500 >, + < 1566000000 1012500 >, + < 1674000000 1050000 >, + < 1728000000 1075000 >; + + qcom,speed1-pvs6-bin-v0 = + < 384000000 875000 >, + < 486000000 875000 >, + < 594000000 875000 >, + < 702000000 875000 >, + < 810000000 887500 >, + < 918000000 900000 >, + < 1026000000 925000 >, + < 1134000000 937500 >, + < 1242000000 950000 >, + < 1350000000 962500 >, + < 1458000000 975000 >, + < 1566000000 1000000 >, + < 1674000000 1025000 >, + < 1728000000 1050000 >; + + qcom,speed2-pvs0-bin-v0 = + < 384000000 950000 >, + < 486000000 950000 >, + < 594000000 950000 >, + < 702000000 950000 >, + < 810000000 962500 >, + < 918000000 975000 >, + < 1026000000 1000000 >, + < 1134000000 1025000 >, + < 1242000000 1037500 >, + < 1350000000 1062500 >, + < 1458000000 1100000 >, + < 1566000000 1125000 >, + < 1674000000 1175000 >, + < 1782000000 1225000 >, + < 1890000000 1287500 >; + + qcom,speed2-pvs1-bin-v0 = + < 384000000 925000 >, + < 486000000 925000 >, + < 594000000 925000 >, + < 702000000 925000 >, + < 810000000 937500 >, + < 918000000 950000 >, + < 1026000000 975000 >, + < 1134000000 1000000 >, + < 1242000000 1012500 >, + < 1350000000 1037500 >, + < 1458000000 1075000 >, + < 1566000000 1100000 >, + < 1674000000 1137500 >, + < 1782000000 1187500 >, + < 1890000000 1250000 >; + + qcom,speed2-pvs2-bin-v0 = + < 384000000 900000 >, + < 486000000 900000 >, + < 594000000 900000 >, + < 702000000 900000 >, + < 810000000 912500 >, + < 918000000 925000 >, + < 1026000000 950000 >, + < 1134000000 975000 >, + < 1242000000 987500 >, + < 1350000000 1012500 >, + < 1458000000 1050000 >, + < 1566000000 1075000 >, + < 1674000000 1112500 >, + < 1782000000 1162500 >, + < 1890000000 1212500 >; + + qcom,speed2-pvs3-bin-v0 = + < 384000000 900000 >, + < 486000000 900000 >, + < 594000000 900000 >, + < 702000000 900000 >, + < 810000000 900000 >, + < 918000000 912500 >, + < 1026000000 937500 >, + < 1134000000 962500 >, + < 1242000000 975000 >, + < 1350000000 1000000 >, + < 1458000000 1025000 >, + < 1566000000 1050000 >, + < 1674000000 1087500 >, + < 1782000000 1137500 >, + < 1890000000 1175000 >; + + qcom,speed2-pvs4-bin-v0 = + < 384000000 875000 >, + < 486000000 875000 >, + < 594000000 875000 >, + < 702000000 875000 >, + < 810000000 887500 >, + < 918000000 900000 >, + < 1026000000 925000 >, + < 1134000000 950000 >, + < 1242000000 962500 >, + < 1350000000 975000 >, + < 1458000000 1000000 >, + < 1566000000 1037500 >, + < 1674000000 1075000 >, + < 1782000000 1112500 >, + < 1890000000 1150000 >; + + qcom,speed2-pvs5-bin-v0 = + < 384000000 875000 >, + < 486000000 875000 >, + < 594000000 875000 >, + < 702000000 875000 >, + < 810000000 887500 >, + < 918000000 900000 >, + < 1026000000 925000 >, + < 1134000000 937500 >, + < 1242000000 950000 >, + < 1350000000 962500 >, + < 1458000000 987500 >, + < 1566000000 1012500 >, + < 1674000000 1050000 >, + < 1782000000 1087500 >, + < 1890000000 1125000 >; + + qcom,speed2-pvs6-bin-v0 = + < 384000000 875000 >, + < 486000000 875000 >, + < 594000000 875000 >, + < 702000000 875000 >, + < 810000000 887500 >, + < 918000000 900000 >, + < 1026000000 925000 >, + < 1134000000 937500 >, + < 1242000000 950000 >, + < 1350000000 962500 >, + < 1458000000 975000 >, + < 1566000000 1000000 >, + < 1674000000 1025000 >, + < 1782000000 1062500 >, + < 1890000000 1100000 >; + + qcom,speed14-pvs0-bin-v0 = + < 384000000 950000 >, + < 486000000 950000 >, + < 594000000 950000 >, + < 702000000 962500 >, + < 810000000 1000000 >, + < 918000000 1025000 >, + < 1026000000 1037500 >, + < 1134000000 1075000 >, + < 1242000000 1087500 >, + < 1350000000 1125000 >, + < 1458000000 1150000 >, + < 1512000000 1162500 >; + + qcom,speed14-pvs1-bin-v0 = + < 384000000 950000 >, + < 486000000 950000 >, + < 594000000 950000 >, + < 702000000 962500 >, + < 810000000 975000 >, + < 918000000 1000000 >, + < 1026000000 1012500 >, + < 1134000000 1037500 >, + < 1242000000 1050000 >, + < 1350000000 1087500 >, + < 1458000000 1112500 >, + < 1512000000 1125000 >; + + qcom,speed14-pvs2-bin-v0 = + < 384000000 925000 >, + < 486000000 925000 >, + < 594000000 925000 >, + < 702000000 925000 >, + < 810000000 937500 >, + < 918000000 950000 >, + < 1026000000 975000 >, + < 1134000000 1000000 >, + < 1242000000 1012500 >, + < 1350000000 1037500 >, + < 1458000000 1075000 >, + < 1512000000 1087500 >; + + qcom,speed14-pvs3-bin-v0 = + < 384000000 900000 >, + < 486000000 900000 >, + < 594000000 900000 >, + < 702000000 900000 >, + < 810000000 900000 >, + < 918000000 925000 >, + < 1026000000 950000 >, + < 1134000000 975000 >, + < 1242000000 987500 >, + < 1350000000 1000000 >, + < 1458000000 1037500 >, + < 1512000000 1050000 >; + + qcom,speed14-pvs4-bin-v0 = + < 384000000 875000 >, + < 486000000 875000 >, + < 594000000 875000 >, + < 702000000 875000 >, + < 810000000 887500 >, + < 918000000 900000 >, + < 1026000000 925000 >, + < 1134000000 950000 >, + < 1242000000 962500 >, + < 1350000000 975000 >, + < 1458000000 1000000 >, + < 1512000000 1012500 >; + + qcom,speed14-pvs5-bin-v0 = + < 384000000 875000 >, + < 486000000 875000 >, + < 594000000 875000 >, + < 702000000 875000 >, + < 810000000 887500 >, + < 918000000 900000 >, + < 1026000000 925000 >, + < 1134000000 937500 >, + < 1242000000 950000 >, + < 1350000000 962500 >, + < 1458000000 987500 >, + < 1512000000 1000000 >; + + qcom,speed14-pvs6-bin-v0 = + < 384000000 875000 >, + < 486000000 875000 >, + < 594000000 875000 >, + < 702000000 875000 >, + < 810000000 887500 >, + < 918000000 900000 >, + < 1026000000 925000 >, + < 1134000000 937500 >, + < 1242000000 950000 >, + < 1350000000 962500 >, + < 1458000000 975000 >, + < 1512000000 987500 >; + }; + smd { compatible = "qcom,smd"; @@ -230,6 +756,12 @@ qcom,smd-edge = <1>; status = "disabled"; + + apr { + compatible = "qcom,apr"; + qcom,smd-channels = "apr_audio_svc"; + rproc = <&pil_q6v4>; + }; }; dsps@3 { @@ -248,7 +780,31 @@ qcom,smd-edge = <6>; status = "disabled"; + + wcnss { + compatible = "qcom,wcnss"; + qcom,smd-channels = "WCNSS_CTRL"; + + bt { + compatible = "qcom,btqcomsmd"; + }; + + wifi { + compatible = "qcom,wcn3660-wlan"; + + interrupts = <0 203 0>, <0 202 0>; + interrupt-names = "tx", "rx"; + + qcom,wcnss-mmio = <0x03000000 0x204000>; + + qcom,state = <&apps_smsm 10>, <&apps_smsm 9>; + qcom,state-names = "tx-enable", "tx-rings-empty"; + + local-mac-address = [ 18 00 2d 88 9c a9 ]; + }; + }; }; + }; smsm { @@ -303,6 +859,17 @@ firmware { scm { compatible = "qcom,scm-apq8064"; + + clocks = <&rpmcc RPM_DAYTONA_FABRIC_CLK>; + clock-names = "core"; + }; + }; + + clocks { + sleep_clk: sleep_clk { + compatible = "fixed-clock"; + clock-frequency = <32768>; + #clock-cells = <0>; }; }; @@ -351,6 +918,13 @@ cpu-offset = <0x80000>; }; + watchdog@208a038 { + compatible = "qcom,kpss-wdt-apq8064"; + reg = <0x0208a038 0x40>; + clocks = <&sleep_clk>; + timeout-sec = <10>; + }; + acc0: clock-controller@2088000 { compatible = "qcom,kpss-acc-v1"; reg = <0x02088000 0x1000>, <0x02008000 0x1000>; @@ -371,28 +945,60 @@ reg = <0x020b8000 0x1000>, <0x02008000 0x1000>; }; - saw0: power-controller@2089000 { - compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2"; + saw0: power-controller@2089000 { + compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2", "syscon", "simple-mfd"; reg = <0x02089000 0x1000>, <0x02009000 0x1000>; - regulator; + #address-cells = <1>; + #size-cells = <1>; + + saw0_regulator: regulator@2089000 { + compatible = "qcom,apq8064-saw2-v1.1-regulator"; + regulator-always-on; + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <1250000>; + }; }; saw1: power-controller@2099000 { - compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2"; + compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2", "syscon", "simple-mfd"; reg = <0x02099000 0x1000>, <0x02009000 0x1000>; - regulator; + #address-cells = <1>; + #size-cells = <1>; + + saw1_regulator: regulator@2099000 { + compatible = "qcom,apq8064-saw2-v1.1-regulator"; + regulator-always-on; + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <1250000>; + }; }; saw2: power-controller@20a9000 { - compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2"; + compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2", "syscon", "simple-mfd"; reg = <0x020a9000 0x1000>, <0x02009000 0x1000>; - regulator; + #address-cells = <1>; + #size-cells = <1>; + + saw2_regulator: regulator@20a9000 { + compatible = "qcom,apq8064-saw2-v1.1-regulator"; + regulator-always-on; + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <1250000>; + }; }; saw3: power-controller@20b9000 { - compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2"; + compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2", "syscon", "simple-mfd"; reg = <0x020b9000 0x1000>, <0x02009000 0x1000>; - regulator; + #address-cells = <1>; + #size-cells = <1>; + + saw3_regulator: regulator@20b9000 { + compatible = "qcom,apq8064-saw2-v1.1-regulator"; + regulator-always-on; + regulator-min-microvolt = <825000>; + regulator-max-microvolt = <1250000>; + }; }; sps_sic_non_secure: sps-sic-non-secure@12100000 { @@ -560,6 +1166,7 @@ #address-cells = <1>; #size-cells = <1>; ranges; + syscon-tcsr = <&tcsr>; gsbi6_serial: serial@16540000 { compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; @@ -568,6 +1175,13 @@ interrupts = <0 156 0x0>; clocks = <&gcc GSBI6_UART_CLK>, <&gcc GSBI6_H_CLK>; clock-names = "core", "iface"; + + qcom,rx-crci = <11>; + qcom,tx-crci = <6>; + + dmas = <&adm 6>, <&adm 7>; + dma-names = "rx", "tx"; + status = "disabled"; }; @@ -848,6 +1462,37 @@ }; }; + wcnss-rproc@3204000 { + compatible = "qcom,riva-pil"; + reg = <0x03204000 0x100>; + + interrupts-extended = <&intc 0 199 IRQ_TYPE_EDGE_RISING>, + <&wcnss_smsm 6 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog", "fatal"; + + memory-region = <&wcnss_mem>; + + vddcx-supply = <&pm8921_s3>; + vddmx-supply = <&pm8921_l24>; + vddpx-supply = <&pm8921_s4>; + + pinctrl-names = "default"; + pinctrl-0 = <&wcnss_pin_a>; + + iris { + compatible = "qcom,wcn3660"; + + clocks = <&rpmcc 9>; + clock-names = "xo"; + + vddxo-supply = <&pm8921_l4>; + vddrfa-supply = <&pm8921_s2>; + vddpa-supply = <&pm8921_l10>; + vdddig-supply = <&pm8921_lvs2>; + }; + }; + + usb1_phy: phy@12500000 { compatible = "qcom,usb-otg-ci"; reg = <0x12500000 0x400>; @@ -1055,11 +1700,256 @@ }; }; + adm: dma@18320000 { + compatible = "qcom,adm"; + reg = <0x18320000 0xE0000>; + interrupts = <GIC_SPI 171 IRQ_TYPE_NONE>; + #dma-cells = <1>; + + clocks = <&gcc ADM0_CLK>, <&gcc ADM0_PBUS_CLK>; + clock-names = "core", "iface"; + + resets = <&gcc ADM0_RESET>, + <&gcc ADM0_PBUS_RESET>, + <&gcc ADM0_C0_RESET>, + <&gcc ADM0_C1_RESET>, + <&gcc ADM0_C2_RESET>; + reset-names = "clk", "pbus", "c0", "c1", "c2"; + qcom,ee = <1>; + + status = "disabled"; + }; + tcsr: syscon@1a400000 { compatible = "qcom,tcsr-apq8064", "syscon"; reg = <0x1a400000 0x100>; }; + gpu: adreno-3xx@4300000 { + compatible = "qcom,adreno-3xx"; + reg = <0x04300000 0x20000>; + reg-names = "kgsl_3d0_reg_memory"; + interrupts = <GIC_SPI 80 0>; + interrupt-names = "kgsl_3d0_irq"; + clock-names = + "core_clk", + "iface_clk", + "mem_clk", + "mem_iface_clk"; + clocks = + <&mmcc GFX3D_CLK>, + <&mmcc GFX3D_AHB_CLK>, + <&mmcc GFX3D_AXI_CLK>, + <&mmcc MMSS_IMEM_AHB_CLK>; + qcom,chipid = <0x03020002>; + + iommus = <&gfx3d 0 + &gfx3d 1 + &gfx3d 2 + &gfx3d 3 + &gfx3d 4 + &gfx3d 5 + &gfx3d 6 + &gfx3d 7 + &gfx3d 8 + &gfx3d 9 + &gfx3d 10 + &gfx3d 11 + &gfx3d 12 + &gfx3d 13 + &gfx3d 14 + &gfx3d 15 + &gfx3d 16 + &gfx3d 17 + &gfx3d 18 + &gfx3d 19 + &gfx3d 20 + &gfx3d 21 + &gfx3d 22 + &gfx3d 23 + &gfx3d 24 + &gfx3d 25 + &gfx3d 26 + &gfx3d 27 + &gfx3d 28 + &gfx3d 29 + &gfx3d 30 + &gfx3d 31 + &gfx3d1 0 + &gfx3d1 1 + &gfx3d1 2 + &gfx3d1 3 + &gfx3d1 4 + &gfx3d1 5 + &gfx3d1 6 + &gfx3d1 7 + &gfx3d1 8 + &gfx3d1 9 + &gfx3d1 10 + &gfx3d1 11 + &gfx3d1 12 + &gfx3d1 13 + &gfx3d1 14 + &gfx3d1 15 + &gfx3d1 16 + &gfx3d1 17 + &gfx3d1 18 + &gfx3d1 19 + &gfx3d1 20 + &gfx3d1 21 + &gfx3d1 22 + &gfx3d1 23 + &gfx3d1 24 + &gfx3d1 25 + &gfx3d1 26 + &gfx3d1 27 + &gfx3d1 28 + &gfx3d1 29 + &gfx3d1 30 + &gfx3d1 31>; + + qcom,gpu-pwrlevels { + compatible = "qcom,gpu-pwrlevels"; + qcom,gpu-pwrlevel@0 { + qcom,gpu-freq = <450000000>; + }; + qcom,gpu-pwrlevel@1 { + qcom,gpu-freq = <27000000>; + }; + }; + }; + + mmss_sfpb: syscon@5700000 { + compatible = "syscon"; + reg = <0x5700000 0x70>; + }; + + dsi0: mdss_dsi@4700000 { + compatible = "qcom,mdss-dsi-ctrl"; + label = "MDSS DSI CTRL->0"; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <GIC_SPI 82 0>; + reg = <0x04700000 0x200>; + reg-names = "dsi_ctrl"; + + clocks = <&mmcc DSI_M_AHB_CLK>, + <&mmcc DSI_S_AHB_CLK>, + <&mmcc AMP_AHB_CLK>, + <&mmcc DSI_CLK>, + <&mmcc DSI1_BYTE_CLK>, + <&mmcc DSI_PIXEL_CLK>, + <&mmcc DSI1_ESC_CLK>; + clock-names = "iface_clk", "bus_clk", "core_mmss_clk", + "src_clk", "byte_clk", "pixel_clk", + "core_clk"; + + assigned-clocks = <&mmcc DSI1_BYTE_SRC>, + <&mmcc DSI1_ESC_SRC>, + <&mmcc DSI_SRC>, + <&mmcc DSI_PIXEL_SRC>; + assigned-clock-parents = <&dsi0_phy 0>, + <&dsi0_phy 0>, + <&dsi0_phy 1>, + <&dsi0_phy 1>; + syscon-sfpb = <&mmss_sfpb>; + phys = <&dsi0_phy>; + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi0_in: endpoint { + }; + }; + + port@1 { + reg = <1>; + dsi0_out: endpoint { + }; + }; + }; + }; + + + dsi0_phy: dsi-phy@4700200 { + compatible = "qcom,dsi-phy-28nm-8960"; + #clock-cells = <1>; + + reg = <0x04700200 0x100>, + <0x04700300 0x200>, + <0x04700500 0x5c>; + reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator"; + clock-names = "iface_clk"; + clocks = <&mmcc DSI_M_AHB_CLK>; + }; + + + mdp_port0: iommu@7500000 { + compatible = "qcom,apq8064-iommu"; + #iommu-cells = <1>; + clock-names = + "smmu_pclk", + "iommu_clk"; + clocks = + <&mmcc SMMU_AHB_CLK>, + <&mmcc MDP_AXI_CLK>; + reg = <0x07500000 0x100000>; + interrupts = + <GIC_SPI 63 0>, + <GIC_SPI 64 0>; + qcom,ncb = <2>; + }; + + mdp_port1: iommu@7600000 { + compatible = "qcom,apq8064-iommu"; + #iommu-cells = <1>; + clock-names = + "smmu_pclk", + "iommu_clk"; + clocks = + <&mmcc SMMU_AHB_CLK>, + <&mmcc MDP_AXI_CLK>; + reg = <0x07600000 0x100000>; + interrupts = + <GIC_SPI 61 0>, + <GIC_SPI 62 0>; + qcom,ncb = <2>; + }; + + gfx3d: iommu@7c00000 { + compatible = "qcom,apq8064-iommu"; + #iommu-cells = <1>; + clock-names = + "smmu_pclk", + "iommu_clk"; + clocks = + <&mmcc SMMU_AHB_CLK>, + <&mmcc GFX3D_AXI_CLK>; + reg = <0x07c00000 0x100000>; + interrupts = + <GIC_SPI 69 0>, + <GIC_SPI 70 0>; + qcom,ncb = <3>; + }; + + gfx3d1: iommu@7d00000 { + compatible = "qcom,apq8064-iommu"; + #iommu-cells = <1>; + clock-names = + "smmu_pclk", + "iommu_clk"; + clocks = + <&mmcc SMMU_AHB_CLK>, + <&mmcc GFX3D_AXI_CLK>; + reg = <0x07d00000 0x100000>; + interrupts = + <GIC_SPI 210 0>, + <GIC_SPI 211 0>; + qcom,ncb = <3>; + }; + pcie: pci@1b500000 { compatible = "qcom,pcie-apq8064", "snps,dw-pcie"; reg = <0x1b500000 0x1000 @@ -1095,6 +1985,142 @@ reset-names = "axi", "ahb", "por", "pci", "phy"; status = "disabled"; }; + + pil_q6v4: pil@28800000 { + compatible = "qcom,tz-pil", "qcom,apq8064-tz-pil"; + qcom,firmware-name = "q6"; + reg = <0x28800000 0x100>; + reg-names = "qdsp6_base"; + qcom,pas-id = <1>; /* PAS_Q6 */ + }; + + dai_fe: dai_fe { + compatible = "qcom,msm-dai-fe"; + #sound-dai-cells = <0>; + }; + + hdmi_dai: dai_hdmi { + compatible = "qcom,msm-dai-q6-hdmi"; + #sound-dai-cells = <0>; + }; + + hdmi_codec: codec_hdmi { + compatible = "linux,hdmi-audio"; + #sound-dai-cells = <0>; + }; + + q6_pcm: msm_pcm { + compatible = "qcom,msm-pcm-dsp"; + #sound-dai-cells = <0>; + }; + + q6_route: msm_pcm_routing { + compatible = "qcom,msm-pcm-routing"; + #sound-dai-cells = <0>; + }; + + snd { + compatible = "qcom,snd-apq8064"; + }; + + hdmi: hdmi-tx@4a00000 { + compatible = "qcom,hdmi-tx-8960"; + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_pinctrl>; + reg = <0x04a00000 0x2f0>; + reg-names = "core_physical"; + interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&mmcc HDMI_APP_CLK>, + <&mmcc HDMI_M_AHB_CLK>, + <&mmcc HDMI_S_AHB_CLK>; + clock-names = "core_clk", + "master_iface_clk", + "slave_iface_clk"; + + phys = <&hdmi_phy>; + phy-names = "hdmi-phy"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + hdmi_in: endpoint { + }; + }; + + port@1 { + reg = <1>; + hdmi_out: endpoint { + }; + }; + }; + }; + + hdmi_phy: hdmi-phy@4a00400 { + compatible = "qcom,hdmi-phy-8960"; + reg = <0x4a00400 0x60>, + <0x4a00500 0x100>; + reg-names = "hdmi_phy", + "hdmi_pll"; + + clocks = <&mmcc HDMI_S_AHB_CLK>; + clock-names = "slave_iface_clk"; + }; + + mdp: mdp@5100000 { + compatible = "qcom,mdp4"; + reg = <0x05100000 0xf0000>; + interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&mmcc MDP_CLK>, + <&mmcc MDP_AHB_CLK>, + <&mmcc MDP_AXI_CLK>, + <&mmcc MDP_LUT_CLK>, + <&mmcc HDMI_TV_CLK>, + <&mmcc MDP_TV_CLK>; + clock-names = "core_clk", + "iface_clk", + "bus_clk", + "lut_clk", + "hdmi_clk", + "tv_clk"; + + iommus = <&mdp_port0 0 + &mdp_port0 2 + &mdp_port1 0 + &mdp_port1 2>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + mdp_lvds_out: endpoint { + }; + }; + + port@1 { + reg = <1>; + mdp_dsi1_out: endpoint { + }; + }; + + port@2 { + reg = <2>; + mdp_dsi2_out: endpoint { + }; + }; + + port@3 { + reg = <3>; + mdp_dtv_out: endpoint { + }; + }; + }; + }; }; }; #include "qcom-apq8064-pins.dtsi" +#include "qcom-apq8064-coresight.dtsi" diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index 9353184d730d..aea77fdce19b 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig @@ -9,6 +9,9 @@ config DMABOUNCE bool select ZONE_DMA +config KRAIT_L2_ACCESSORS + bool + config SHARP_LOCOMO bool diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 27f23b15b1ea..3137e2ce44ce 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -7,6 +7,7 @@ obj-y += firmware.o obj-$(CONFIG_ICST) += icst.o obj-$(CONFIG_SA1111) += sa1111.o obj-$(CONFIG_DMABOUNCE) += dmabounce.o +obj-$(CONFIG_KRAIT_L2_ACCESSORS) += krait-l2-accessors.o obj-$(CONFIG_SHARP_LOCOMO) += locomo.o obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o obj-$(CONFIG_SHARP_SCOOP) += scoop.o diff --git a/arch/arm/common/krait-l2-accessors.c b/arch/arm/common/krait-l2-accessors.c new file mode 100644 index 000000000000..5d514bbc88a6 --- /dev/null +++ b/arch/arm/common/krait-l2-accessors.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/spinlock.h> +#include <linux/export.h> + +#include <asm/barrier.h> +#include <asm/krait-l2-accessors.h> + +static DEFINE_RAW_SPINLOCK(krait_l2_lock); + +void krait_set_l2_indirect_reg(u32 addr, u32 val) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&krait_l2_lock, flags); + /* + * Select the L2 window by poking l2cpselr, then write to the window + * via l2cpdr. + */ + asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr)); + isb(); + asm volatile ("mcr p15, 3, %0, c15, c0, 7 @ l2cpdr" : : "r" (val)); + isb(); + + raw_spin_unlock_irqrestore(&krait_l2_lock, flags); +} +EXPORT_SYMBOL(krait_set_l2_indirect_reg); + +u32 krait_get_l2_indirect_reg(u32 addr) +{ + u32 val; + unsigned long flags; + + raw_spin_lock_irqsave(&krait_l2_lock, flags); + /* + * Select the L2 window by poking l2cpselr, then read from the window + * via l2cpdr. + */ + asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr)); + isb(); + asm volatile ("mrc p15, 3, %0, c15, c0, 7 @ l2cpdr" : "=r" (val)); + + raw_spin_unlock_irqrestore(&krait_l2_lock, flags); + + return val; +} +EXPORT_SYMBOL(krait_get_l2_indirect_reg); diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig index 9f6d2a69a6f7..72c8e5567f42 100644 --- a/arch/arm/configs/qcom_defconfig +++ b/arch/arm/configs/qcom_defconfig @@ -1,5 +1,4 @@ CONFIG_SYSVIPC=y -CONFIG_FHANDLE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y @@ -24,14 +23,24 @@ CONFIG_ARCH_MSM8X60=y CONFIG_ARCH_MSM8960=y CONFIG_ARCH_MSM8974=y CONFIG_ARCH_MDM9615=y +CONFIG_PCI=y +CONFIG_PCI_MSI=y +CONFIG_PCIE_QCOM=y CONFIG_SMP=y -CONFIG_HAVE_ARM_ARCH_TIMER=y CONFIG_PREEMPT=y CONFIG_AEABI=y CONFIG_HIGHMEM=y CONFIG_CLEANCACHE=y CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT_DETAILS=y +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPUFREQ_DT=y +CONFIG_ARM_QCOM_CPUFREQ=y CONFIG_CPU_IDLE=y CONFIG_ARM_CPUIDLE=y CONFIG_VFP=y @@ -49,7 +58,6 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_CFG80211=y CONFIG_RFKILL=y @@ -68,8 +76,12 @@ CONFIG_CHR_DEV_SCH=y CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y +CONFIG_ATL1C=y CONFIG_KS8851=y CONFIG_MDIO_BITBANG=y CONFIG_MDIO_GPIO=y @@ -95,8 +107,6 @@ CONFIG_SERIO_LIBPS2=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_MSM=y -CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_QUP=y CONFIG_SPI=y @@ -114,20 +124,20 @@ CONFIG_PINCTRL_QCOM_SSBI_PMIC=y CONFIG_GPIOLIB=y CONFIG_DEBUG_GPIO=y CONFIG_GPIO_SYSFS=y -CONFIG_CHARGER_QCOM_SMBB=y CONFIG_POWER_RESET=y CONFIG_POWER_RESET_MSM=y CONFIG_THERMAL=y -CONFIG_MFD_PM8XXX=y CONFIG_MFD_PM8921_CORE=y CONFIG_MFD_QCOM_RPM=y CONFIG_MFD_SPMI_PMIC=y -CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_QCOM_RPM=y +CONFIG_REGULATOR_QCOM_SAW=y CONFIG_REGULATOR_QCOM_SMD_RPM=y CONFIG_MEDIA_SUPPORT=y -CONFIG_FB=y +CONFIG_DRM=y +# CONFIG_DRM_MSM_HDMI_HDCP is not set +CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_DYNAMIC_MINORS=y @@ -138,17 +148,22 @@ CONFIG_SND_SOC=y CONFIG_HID_BATTERY_STRENGTH=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_OTG=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_MSM=y CONFIG_USB_ACM=y +CONFIG_USB_CHIPIDEA=y +CONFIG_USB_CHIPIDEA_UDC=y +CONFIG_USB_CHIPIDEA_HOST=y CONFIG_USB_SERIAL=y +CONFIG_USB_MSM_OTG=y CONFIG_USB_GADGET=y CONFIG_USB_GADGET_DEBUG_FILES=y CONFIG_USB_GADGET_VBUS_DRAW=500 CONFIG_MMC=y CONFIG_MMC_BLOCK_MINORS=32 CONFIG_MMC_ARMMMCI=y -CONFIG_MMC_QCOM_DML=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_MSM=y @@ -158,6 +173,8 @@ CONFIG_DMADEVICES=y CONFIG_QCOM_BAM_DMA=y CONFIG_STAGING=y CONFIG_COMMON_CLK_QCOM=y +CONFIG_QCOM_CLK_RPM=y +CONFIG_QCOM_CLK_SMD_RPM=y CONFIG_APQ_MMCC_8084=y CONFIG_IPQ_LCC_806X=y CONFIG_MSM_GCC_8660=y @@ -166,7 +183,11 @@ CONFIG_MDM_GCC_9615=y CONFIG_MDM_LCC_9615=y CONFIG_MSM_MMCC_8960=y CONFIG_MSM_MMCC_8974=y +CONFIG_QCOM_HFPLL=y +CONFIG_KPSS_XCC=y +CONFIG_KRAITCC=y CONFIG_HWSPINLOCK_QCOM=y +CONFIG_MSM_IOMMU=y CONFIG_QCOM_GSBI=y CONFIG_QCOM_PM=y CONFIG_QCOM_SMEM=y @@ -179,7 +200,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_EXT3_FS=y CONFIG_FUSE_FS=y CONFIG_VFAT_FS=y -CONFIG_TMPFS=y CONFIG_JFFS2_FS=y CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y diff --git a/arch/arm/include/asm/krait-l2-accessors.h b/arch/arm/include/asm/krait-l2-accessors.h new file mode 100644 index 000000000000..48fe5527bc01 --- /dev/null +++ b/arch/arm/include/asm/krait-l2-accessors.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASMARM_KRAIT_L2_ACCESSORS_H +#define __ASMARM_KRAIT_L2_ACCESSORS_H + +extern void krait_set_l2_indirect_reg(u32 addr, u32 val); +extern u32 krait_get_l2_indirect_reg(u32 addr); + +#endif diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi index f881437d53c5..d94640812194 100644 --- a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi @@ -1,4 +1,5 @@ #include <dt-bindings/pinctrl/qcom,pmic-gpio.h> +#include <dt-bindings/pinctrl/qcom,pmic-mpp.h> &pm8916_gpios { @@ -30,6 +31,18 @@ &pm8916_mpps { + pinctrl-names = "default"; + pinctrl-0 = <&ls_exp_gpio_f>; + + ls_exp_gpio_f: pm8916_mpp4 { + pinconf { + pins = "mpp4"; + function = "digital"; + output-low; + power-source = <PM8916_MPP_L5>; // 1.8V + }; + }; + pm8916_mpps_leds: pm8916_mpps_leds { pinconf { pins = "mpp2", "mpp3"; diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi index e1e6c6b5c489..185388de914c 100644 --- a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi @@ -72,4 +72,17 @@ bias-disable; }; }; + + msm_key_volp_n_default: msm_key_volp_n_default { + pinmux { + function = "gpio"; + pins = "gpio107"; + }; + pinconf { + pins = "gpio107"; + drive-strength = <8>; + input-enable; + bias-pull-up; + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi index bb062b547110..012844132973 100644 --- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi @@ -15,6 +15,9 @@ #include "pm8916.dtsi" #include "apq8016-sbc-soc-pins.dtsi" #include "apq8016-sbc-pmic-pins.dtsi" +#include <dt-bindings/gpio/gpio.h> +#include <dt-bindings/input/input.h> +#include <dt-bindings/sound/apq8016-lpass.h> / { aliases { @@ -84,6 +87,7 @@ pinctrl-names = "default","sleep"; pinctrl-0 = <&adv7533_int_active &adv7533_switch_active>; pinctrl-1 = <&adv7533_int_suspend &adv7533_switch_suspend>; + #sound-dai-cells = <1>; ports { #address-cells = <1>; @@ -163,14 +167,14 @@ led@5 { label = "apq8016-sbc:yellow:wlan"; gpios = <&pm8916_mpps 2 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "wlan"; + linux,default-trigger = "phy0tx"; default-state = "off"; }; led@6 { label = "apq8016-sbc:blue:bt"; gpios = <&pm8916_mpps 3 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "bt"; + linux,default-trigger = "hci0-power"; default-state = "off"; }; }; @@ -251,6 +255,97 @@ vddio-supply = <&pm8916_l6>; }; }; + + lpass_codec: codec{ + status = "okay"; + }; + + /* + Internal Codec + playback - Primary MI2S + capture - Ter MI2S + + External Primary: + playback - secondary MI2S + capture - Quat MI2S + + External Secondary: + playback - Quat MI2S + capture - Quat MI2S + + */ + + sound: sound { + compatible = "qcom,apq8016-sbc-sndcard"; + reg = <0x07702000 0x4>, <0x07702004 0x4>; + reg-names = "mic-iomux", "spkr-iomux"; + + status = "okay"; + pinctrl-0 = <&cdc_pdm_lines_act &ext_sec_tlmm_lines_act &ext_mclk_tlmm_lines_act>; + pinctrl-1 = <&cdc_pdm_lines_sus &ext_sec_tlmm_lines_sus &ext_mclk_tlmm_lines_sus>; + pinctrl-names = "default", "sleep"; + qcom,model = "DB410c"; + qcom,audio-routing = + "AMIC2", "MIC BIAS Internal2", + "AMIC3", "MIC BIAS External1"; + external-dai-link@0 { + link-name = "ADV7533"; + cpu { /* QUAT */ + sound-dai = <&lpass MI2S_QUATERNARY>; + }; + codec { + sound-dai = <&adv_bridge 0>; + }; + }; + + internal-codec-playback-dai-link@0 { /* I2S - Internal codec */ + link-name = "WCD"; + cpu { /* PRIMARY */ + sound-dai = <&lpass MI2S_PRIMARY>; + }; + codec { + sound-dai = <&lpass_codec 0>, <&wcd_codec 0>; + }; + }; + + internal-codec-capture-dai-link@0 { /* I2S - Internal codec */ + link-name = "WCD-Capture"; + cpu { /* PRIMARY */ + sound-dai = <&lpass MI2S_TERTIARY>; + }; + codec { + sound-dai = <&lpass_codec 1>, <&wcd_codec 1>; + }; + }; + }; + + wcnss@a21b000 { + status = "okay"; + }; + + camera_vdddo_1v8: fixedregulator@0 { + compatible = "regulator-fixed"; + regulator-name = "camera_vdddo"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + camera_vdda_2v8: fixedregulator@1 { + compatible = "regulator-fixed"; + regulator-name = "camera_vdda"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + regulator-always-on; + }; + + camera_vddd_1v5: fixedregulator@2 { + compatible = "regulator-fixed"; + regulator-name = "camera_vddd"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + regulator-always-on; + }; }; usb2513 { @@ -276,6 +371,120 @@ }; }; }; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + autorepeat; + + pinctrl-names = "default"; + pinctrl-0 = <&msm_key_volp_n_default>; + + button@0 { + label = "Volume Up"; + linux,code = <KEY_VOLUMEUP>; + gpios = <&msmgpio 107 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&camss { + status = "ok"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + csiphy0_ep: endpoint { + clock-lanes = <1>; + data-lanes = <0 2>; + remote-endpoint = <&ov5645_ep>; + status = "disabled"; + }; + }; + port@1 { + reg = <1>; + csiphy1_ep: endpoint { + clock-lanes = <1>; + data-lanes = <0 2>; + remote-endpoint = <&ov5645_2_ep>; + status = "disabled"; + }; + }; + }; +}; + +&cci { + status = "ok"; + + camera_rear@76 { + compatible = "ovti,ov5645"; + reg = <0x76>; + + enable-gpios = <&msmgpio 34 GPIO_ACTIVE_HIGH>; + reset-gpios = <&msmgpio 35 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&camera_rear_default>; + + clocks = <&gcc GCC_CAMSS_MCLK0_CLK>; + clock-names = "xclk"; + clock-frequency = <23880000>; + + vdddo-supply = <&camera_vdddo_1v8>; + vdda-supply = <&camera_vdda_2v8>; + vddd-supply = <&camera_vddd_1v5>; + + status = "disabled"; + + port { + ov5645_ep: endpoint { + clock-lanes = <1>; + data-lanes = <0 2>; + remote-endpoint = <&csiphy0_ep>; + }; + }; + }; + + camera_front@74 { + compatible = "ovti,ov5645"; + reg = <0x74>; + + enable-gpios = <&msmgpio 33 GPIO_ACTIVE_HIGH>; + reset-gpios = <&msmgpio 28 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&camera_front_default>; + + clocks = <&gcc GCC_CAMSS_MCLK1_CLK>; + clock-names = "xclk"; + clock-frequency = <23880000>; + + vdddo-supply = <&camera_vdddo_1v8>; + vdda-supply = <&camera_vdda_2v8>; + vddd-supply = <&camera_vddd_1v5>; + + status = "disabled"; + + port { + ov5645_2_ep: endpoint { + clock-lanes = <1>; + data-lanes = <0 2>; + remote-endpoint = <&csiphy1_ep>; + }; + }; + }; +}; + +&wcd_codec { + status = "okay"; + clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>; + clock-names = "mclk"; +}; + +&spmi_pon { + // Overwrite RESETIN_N keyboard scan code + linux,code = <KEY_VOLUMEDOWN>; }; &smd_rpm_regulators { @@ -308,8 +517,8 @@ }; l2 { - regulator-min-microvolt = <375000>; - regulator-max-microvolt = <1525000>; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; }; l3 { @@ -328,8 +537,8 @@ }; l6 { - regulator-min-microvolt = <1750000>; - regulator-max-microvolt = <3337000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; }; l7 { @@ -388,8 +597,8 @@ }; l17 { - regulator-min-microvolt = <1750000>; - regulator-max-microvolt = <3337000>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; }; l18 { @@ -397,3 +606,7 @@ regulator-max-microvolt = <3337000>; }; }; + +&venus { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi index afb218cffc60..559f172306cd 100644 --- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi @@ -17,7 +17,7 @@ / { aliases { serial0 = &blsp2_uart1; - serial1 = &blsp2_uart2; + //serial1 = &blsp2_uart2; i2c0 = &blsp1_i2c2; i2c1 = &blsp2_i2c1; i2c2 = &blsp2_i2c0; @@ -38,13 +38,13 @@ pinctrl-1 = <&blsp2_uart1_2pins_sleep>; }; - serial@75b1000 { - label = "LS-UART0"; - status = "okay"; - pinctrl-names = "default", "sleep"; - pinctrl-0 = <&blsp2_uart2_4pins_default>; - pinctrl-1 = <&blsp2_uart2_4pins_sleep>; - }; + //serial@75b1000 { + // label = "LS-UART0"; + // status = "okay"; + // pinctrl-names = "default", "sleep"; + // pinctrl-0 = <&blsp2_uart2_4pins_default>; + // pinctrl-1 = <&blsp2_uart2_4pins_sleep>; + //}; i2c@07577000 { /* On Low speed expansion */ diff --git a/arch/arm64/boot/dts/qcom/msm-iommu-v2.dtsi b/arch/arm64/boot/dts/qcom/msm-iommu-v2.dtsi new file mode 100644 index 000000000000..66d55913a10d --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm-iommu-v2.dtsi @@ -0,0 +1,259 @@ +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + gfx_iommu: qcom,iommu@1f00000 { + compatible = "qcom,msm-smmu-v2", "qcom,msm-mmu-500"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + reg = <0x1f00000 0x10000>; + reg-names = "iommu_base"; + interrupts = <0 43 0>, <0 42 0>; + interrupt-names = "global_cfg_NS_irq", "global_cfg_S_irq"; + label = "gfx_iommu"; + qcom,iommu-secure-id = <18>; + clocks = <&gcc GCC_SMMU_CFG_CLK>, + <&gcc GCC_GFX_TCU_CLK>; + clock-names = "iface_clk", "core_clk"; + status = "disabled"; + + qcom,iommu-ctx@1f09000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1f09000 0x1000>; + interrupts = <0 241 0>; + qcom,iommu-ctx-sids = <0>; + label = "gfx3d_user"; + }; + + qcom,iommu-ctx@1f0a000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1f0a000 0x1000>; + interrupts = <0 242 0>; + qcom,iommu-ctx-sids = <1>; + label = "gfx3d_priv"; + }; + }; + + apps_iommu: qcom,iommu@1e00000 { + compatible = "qcom,msm-smmu-v2", "qcom,msm-mmu-500"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + reg = <0x1e00000 0x40000 + 0x1ef0000 0x3000>; + reg-names = "iommu_base", "smmu_local_base"; + interrupts = <0 43 0>, <0 42 0>; + interrupt-names = "global_cfg_NS_irq", "global_cfg_S_irq"; + label = "apps_iommu"; + qcom,iommu-secure-id = <17>; + clocks = <&gcc GCC_SMMU_CFG_CLK>, + <&gcc GCC_APSS_TCU_CLK>; + clock-names = "iface_clk", "core_clk"; + qcom,cb-base-offset = <0x20000>; + #iommu-cells = <1>; + status = "disabled"; + + qcom,iommu-ctx@1e22000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e22000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <2>; + qcom,iommu-ctx-sids = <0x2000>; + label = "jpeg_enc0"; + }; + + qcom,iommu-ctx@1e23000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e23000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <3>; + qcom,iommu-ctx-sids = <0x400>; + label = "vfe"; + }; + + qcom,iommu-ctx@1e24000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e24000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <4>; + qcom,iommu-ctx-sids = <0xc00>; + label = "mdp_0"; + }; + + venus_ns: venus_ns@1e25000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e25000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <5>; + qcom,iommu-ctx-sids = <0x800 0x801 0x802 0x803 + 0x804 0x805 0x807>; + label = "venus_ns"; + }; + + qcom,iommu-ctx@1e26000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e26000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <6>; + qcom,iommu-ctx-sids = <0x402>; + label = "cpp"; + }; + + qcom,iommu-ctx@1e27000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e27000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <7>; + qcom,iommu-ctx-sids = <0x1000>; + label = "mDSP"; + }; + + qcom,iommu-ctx@1e28000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e28000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <8>; + qcom,iommu-ctx-sids = <0x1400>; + label = "gss"; + }; + + qcom,iommu-ctx@1e29000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e29000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num= <9>; + qcom,iommu-ctx-sids = <0x1800>; + label = "a2"; + }; + + qcom,iommu-ctx@1e32000 { + compatible = "qcom,msm-smmu-v2-ctx"; + qcom,secure-context; + reg = <0x1e32000 0x1000>; + interrupts = <0 70 0>, <0 70 0>; + qcom,ctx-num = <18>; + qcom,iommu-ctx-sids = <0xc01>; + label = "mdp_1"; + }; + + venus_sec_pixel: venus_sec_pixel@1e33000 { + compatible = "qcom,msm-smmu-v2-ctx"; + qcom,secure-context; + reg = <0x1e33000 0x1000>; + interrupts = <0 70 0>, <0 70 0>; + qcom,ctx-num = <19>; + qcom,iommu-ctx-sids = <0x885>; + label = "venus_sec_pixel"; + }; + + venus_sec_bitstream: venus_sec_bitstream@1e34000 { + compatible = "qcom,msm-smmu-v2-ctx"; + qcom,secure-context; + reg = <0x1e34000 0x1000>; + interrupts = <0 70 0>, <0 70 0>; + qcom,ctx-num= <20>; + qcom,iommu-ctx-sids = <0x880 0x881 0x882 0x883 0x884>; + label = "venus_sec_bitstream"; + }; + + venus_sec_non_pixel: venus_sec_non_pixel@1e35000 { + compatible = "qcom,msm-smmu-v2-ctx"; + qcom,secure-context; + reg = <0x1e35000 0x1000>; + interrupts = <0 70 0>, <0 70 0>; + qcom,ctx-num = <21>; + qcom,iommu-ctx-sids = <0x887 0x8a0>; + label = "venus_sec_non_pixel"; + }; + + venus_fw: qcom,iommu-ctx@1e36000 { + compatible = "qcom,msm-smmu-v2-ctx"; + qcom,secure-context; + reg = <0x1e36000 0x1000>; + interrupts = <0 70 0>, <0 70 0>; + qcom,ctx-num = <22>; + qcom,iommu-ctx-sids = <0x8c0 0x8c6>; + label = "venus_fw"; + }; + + periph_rpm: qcom,iommu-ctx@1e37000 { + compatible = "qcom,msm-smmu-v2-ctx"; + qcom,secure-context; + reg = <0x1e37000 0x1000>; + interrupts = <0 70 0>, <0 70 0>; + qcom,ctx-num = <23>; + qcom,iommu-ctx-sids = <0x40>; + label = "periph_rpm"; + }; + + qcom,iommu-ctx@1e38000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e38000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <24>; + qcom,iommu-ctx-sids = <0xC0 0xC4 0xC8 0xCC 0xD0 0xD3 + 0xD4 0xD7 0xD8 0xDB 0xDC 0xDF + 0xF0 0xF3 0xF4 0xF7 0xF8 0xFB + 0xFC 0xFF>; + label = "periph_CE"; + }; + + qcom,iommu-ctx@1e39000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e39000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <25>; + qcom,iommu-ctx-sids = <0x280 0x283 0x284 0x287 0x288 + 0x28B 0x28C 0x28F 0x290 0x293 + 0x294 0x297 0x298 0x29B 0x29C + 0x29F>; + label = "periph_BLSP"; + }; + + qcom,iommu-ctx@1e3a000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e3a000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <26>; + qcom,iommu-ctx-sids = <0x100>; + label = "periph_SDC1"; + }; + + qcom,iommu-ctx@1e3b000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e3b000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <27>; + qcom,iommu-ctx-sids = <0x140>; + label = "periph_SDC2"; + }; + + qcom,iommu-ctx@1e3c000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e3c000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <28>; + qcom,iommu-ctx-sids = <0x1c0>; + label = "periph_audio"; + }; + + qcom,iommu-ctx@1e3d000 { + compatible = "qcom,msm-smmu-v2-ctx"; + reg = <0x1e3d000 0x1000>; + interrupts = <0 70 0>; + qcom,ctx-num = <29>; + qcom,iommu-ctx-sids = <0x2c0>; + label = "periph_USB_HS1"; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8916-bus.dtsi b/arch/arm64/boot/dts/qcom/msm8916-bus.dtsi new file mode 100644 index 000000000000..11e707cef476 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8916-bus.dtsi @@ -0,0 +1,858 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <dt-bindings/soc/msm-bus-rule-ops.h> + +&soc { + ad_hoc_bus: ad-hoc-bus { }; + + static-rules { + compatible = "qcom,msm-bus-static-bw-rules"; + + rule0 { + qcom,src-nodes = <&mas_apss>; + qcom,src-field = <FLD_IB>; + qcom,src-op = <OP_LE>; + qcom,thresh = <1600000>; + qcom,mode = <THROTTLE_ON>; + qcom,dest-node = <&mas_apss>; + qcom,dest-bw = <600000>; + }; + + + rule1 { + qcom,src-nodes = <&mas_apss>; + qcom,src-field = <FLD_IB>; + qcom,src-op = <OP_LE>; + qcom,thresh = <3200000>; + qcom,mode = <THROTTLE_ON>; + qcom,dest-node = <&mas_apss>; + qcom,dest-bw = <1200000>; + }; + + rule2 { + qcom,src-nodes = <&mas_apss>; + qcom,src-field = <FLD_IB>; + qcom,src-op = <OP_GT>; + qcom,thresh = <3200000>; + qcom,mode = <THROTTLE_OFF>; + qcom,dest-node = <&mas_apss>; + }; + + rule3 { + qcom,src-nodes = <&mas_gfx>; + qcom,src-field = <FLD_IB>; + qcom,src-op = <OP_LE>; + qcom,thresh = <1600000>; + qcom,mode = <THROTTLE_ON>; + qcom,dest-node = <&mas_gfx>; + qcom,dest-bw = <600000>; + }; + + rule4 { + qcom,src-nodes = <&mas_gfx>; + qcom,src-field = <FLD_IB>; + qcom,src-op = <OP_LE>; + qcom,thresh = <3200000>; + qcom,mode = <THROTTLE_ON>; + qcom,dest-node = <&mas_gfx>; + qcom,dest-bw = <1200000>; + }; + + rule5 { + qcom,src-nodes = <&mas_gfx>; + qcom,src-field = <FLD_IB>; + qcom,src-op = <OP_GT>; + qcom,thresh = <3200000>; + qcom,mode = <THROTTLE_OFF>; + qcom,dest-node = <&mas_gfx>; + }; + }; +}; + +&ad_hoc_bus { + compatible = "qcom,msm-bus-device"; + reg = <0x580000 0x14000>, + <0x400000 0x62000>, + <0x500000 0x11000>; + reg-names = "snoc-base", "bimc-base", "pnoc-base"; + + fab_snoc: fab-snoc { + cell-id = <1024>; + label = "fab-snoc"; + qcom,fab-dev; + qcom,base-name = "snoc-base"; + qcom,base-offset = <0x7000>; + qcom,qos-off = <0x1000>; + qcom,bus-type = <1>; + clock-names = "bus_clk", "bus_a_clk"; + clocks = <&rpmcc RPM_SMD_SNOC_CLK>, + <&rpmcc RPM_SMD_SNOC_A_CLK>; + }; + + fab_bimc: fab-bimc { + cell-id = <0>; + label = "fab-bimc"; + qcom,fab-dev; + qcom,base-name = "bimc-base"; + qcom,bus-type = <2>; + clock-names = "bus_clk", "bus_a_clk"; + clocks = <&rpmcc RPM_SMD_BIMC_CLK>, + <&rpmcc RPM_SMD_BIMC_A_CLK>; + }; + + fab_pnoc: fab-pnoc { + cell-id = <4096>; + label = "fab-pnoc"; + qcom,fab-dev; + qcom,base-name = "pnoc-base"; + qcom,base-offset = <0x7000>; + qcom,qos-delta = <0x1000>; + qcom,bus-type = <1>; + clock-names = "bus_clk", "bus_a_clk"; + clocks = <&rpmcc RPM_SMD_PCNOC_CLK>, + <&rpmcc RPM_SMD_PCNOC_A_CLK>; + }; + + /* SNOC Devices */ + mas_video: mas-video { + cell-id = <63>; + label = "mas-video"; + qcom,qport = <8>; + qcom,ap-owned; + qcom,connections = <&mm_int_0 &mm_int_2>; + qcom,bus-dev = <&fab_snoc>; + qcom,qos-mode = "bypass"; + qcom,buswidth = <16>; + }; + + mas_jpeg: mas-jpeg { + cell-id = <62>; + label = "mas-jpeg"; + qcom,ap-owned; + qcom,qport = <6>; + qcom,connections = <&mm_int_0 &mm_int_2>; + qcom,bus-dev = <&fab_snoc>; + qcom,qos-mode = "bypass"; + qcom,buswidth = <16>; + }; + + mas_vfe: mas-vfe { + cell-id = <29>; + label = "mas-vfe"; + qcom,ap-owned; + qcom,qport = <9>; + qcom,connections = <&mm_int_1 &mm_int_2>; + qcom,bus-dev = <&fab_snoc>; + qcom,qos-mode = "bypass"; + qcom,buswidth = <16>; + }; + + mas_mdp: mas-mdp { + cell-id = <22>; + label = "mas-mdp"; + qcom,ap-owned; + qcom,connections = <&mm_int_0 &mm_int_2>; + qcom,qport = <7>; + qcom,bus-dev = <&fab_snoc>; + qcom,qos-mode = "bypass"; + qcom,buswidth = <16>; + }; + + mas_qdss_bam: mas-qdss-bam { + cell-id = <53>; + label = "mas-qdss-bam"; + qcom,connections = <&qdss_int>; + qcom,qport = <11>; + qcom,bus-dev = <&fab_snoc>; + qom,buswidth = <4>; + qcom,ap-owned; + qcom,qos-mode = "fixed"; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + }; + + mas_snoc_cfg: mas-snoc-cfg { + cell-id = <54>; + label = "mas-snoc-cfg"; + qcom,connections = <&qdss_int>; + qcom,bus-dev = <&fab_snoc>; + qcom,qos-mode = "bypass"; + qom,buswidth = <4>; + qcom,mas-rpm-id = <20>; + }; + + mas_qdss_etr: mas-qdss-etr { + cell-id = <60>; + label = "mas-qdss-etr"; + qcom,connections = <&qdss_int>; + qcom,qport = <10>; + qcom,bus-dev = <&fab_snoc>; + qcom,qos-mode = "fixed"; + qcom,prio1 = <1>; + qcom,prio0 = <1>; + qom,buswidth = <8>; + qcom,ap-owned; + }; + + mm_int_0: mm-int-0 { + cell-id = <10000>; + label = "mm-int-0"; + qcom,ap-owned; + qcom,connections = <&mm_int_bimc>; + qcom,bus-dev = <&fab_snoc>; + qcom,buswidth = <16>; + }; + + mm_int_1: mm-int-1 { + cell-id = <10001>; + label = "mm-int1"; + qcom,ap-owned; + qcom,connections = <&mm_int_bimc>; + qcom,bus-dev = <&fab_snoc>; + qcom,buswidth = <16>; + }; + + mm_int_2: mm-int-2 { + cell-id = <10002>; + label = "mm-int2"; + qcom,ap-owned; + qcom,connections = <&snoc_int_0>; + qcom,bus-dev = <&fab_snoc>; + qcom,buswidth = <16>; + }; + + mm_int_bimc: mm-int-bimc { + cell-id = <10003>; + label = "mm-int-bimc"; + qcom,ap-owned; + qcom,connections = <&snoc_bimc_1_mas>; + qcom,bus-dev = <&fab_snoc>; + qcom,buswidth = <16>; + }; + + snoc_int_0: snoc-int-0 { + cell-id = <10004>; + label = "snoc-int-0"; + qcom,connections = <&slv_qdss_stm &slv_imem &snoc_pnoc_mas>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = <99>; + qcom,slv-rpm-id = <130>; + qcom,buswidth = <8>; + }; + + snoc_int_1: snoc-int-1 { + cell-id = <10005>; + label = "snoc-int-1"; + qcom,connections = <&slv_apss &slv_cats_0 &slv_cats_1>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = <100>; + qcom,slv-rpm-id = <131>; + qcom,buswidth = <8>; + }; + + snoc_int_bimc: snoc-int-bmc { + cell-id = <10006>; + label = "snoc-bimc"; + qcom,connections = <&snoc_bimc_0_mas>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = <101>; + qcom,slv-rpm-id = <132>; + qcom,buswidth = <8>; + }; + + snoc_bimc_0_mas: snoc-bimc-0-mas { + cell-id = <10007>; + label = "snoc-bimc-0-mas"; + qcom,connections = <&snoc_bimc_0_slv>; + qcom,bus-dev = <&fab_snoc>; + qcom,mas-rpm-id = <3>; + qcom,buswidth = <8>; + }; + + snoc_bimc_1_mas: snoc-bimc-1-mas { + cell-id = <10008>; + label = "snoc-bimc-1-mas"; + qcom,connections = <&snoc_bimc_1_slv>; + qcom,bus-dev = <&fab_snoc>; + qcom,ap-owned; + qcom,buswidth = <16>; + }; + + qdss_int: qdss-int { + cell-id = <10009>; + label = "qdss-int"; + qcom,ap-owned; + qcom,connections = <&snoc_int_0 &snoc_int_bimc>; + qcom,bus-dev = <&fab_snoc>; + qcom,buswidth = <8>; + }; + + bimc_snoc_slv: bimc-snoc-slv { + cell-id = <10017>; + label = "bimc_snoc_slv"; + qcom,ap-owned; + qcom,connections = <&snoc_int_0 &snoc_int_1>; + qcom,bus-dev = <&fab_snoc>; + qcom,buswidth = <8>; + }; + + snoc_pnoc_mas: snoc-pnoc-mas { + cell-id = <10027>; + label = "snoc-pnoc-mas"; + qcom,connections = <&snoc_pnoc_slv>; + qcom,bus-dev = <&fab_snoc>; + qcom,buswidth = <8>; + }; + + pnoc_snoc_slv: pnoc-snoc-slv { + cell-id = <10011>; + label = "snoc-pnoc"; + qcom,connections = <&snoc_int_0 &snoc_int_bimc &snoc_int_1>; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = <45>; + qcom,buswidth = <8>; + }; + + slv_srvc_snoc: slv-srvc-snoc { + cell-id = <587>; + label = "snoc-srvc-snoc"; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = <29>; + qcom,buswidth = <8>; + }; + + slv_qdss_stm: slv-qdss-stm { + cell-id = <588>; + label = "snoc-qdss-stm"; + qcom,bus-dev = <&fab_snoc>; + qcom,buswidth = <4>; + qcom,slv-rpm-id = <30>; + }; + + slv_imem: slv-imem { + cell-id = <519>; + label = "slv_imem"; + qcom,bus-dev = <&fab_snoc>; + qcom,buswidth = <8>; + qcom,slv-rpm-id = <26>; + }; + + slv_apss: slv-apss { + cell-id = <517>; + label = "slv_apss"; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = <20>; + qcom,buswidth = <4>; + }; + + slv_cats_0: slv-cats-0 { + cell-id = <663>; + label = "slv-cats-0"; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = <106>; + qcom,buswidth = <16>; + }; + + slv_cats_1: slv-cats-1 { + cell-id = <664>; + label = "slv-cats-1"; + qcom,bus-dev = <&fab_snoc>; + qcom,slv-rpm-id = <107>; + qcom,buswidth = <8>; + }; + + /* BIMC nodes */ + mas_apss: mas-apss { + cell-id = <1>; + label = "mas-apss"; + qcom,ap-owned; + qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>; + qcom,qport = <0>; + qcom,bus-dev = <&fab_bimc>; + qcom,qos-mode = "fixed"; + qcom,prio-lvl = <0>; + qcom,prio-rd = <0>; + qcom,prio-wr = <0>; + qcom,ws = <10000>; + qcom,gp = <5000>; + qcom,thmp = <50>; + qom,buswidth = <8>; + }; + + mas_tcu0: mas-tcu0 { + cell-id = <104>; + label = "mas-tcu0"; + qcom,ap-owned; + qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>; + qcom,qport = <5>; + qcom,bus-dev = <&fab_bimc>; + qcom,qos-mode = "fixed"; + qcom,prio-lvl = <2>; + qcom,prio-rd = <2>; + qcom,prio-wr = <2>; + qom,buswidth = <8>; + }; + + mas_tcu1: mas-tcu1 { + cell-id = <105>; + label = "mas-tcu1"; + qcom,ap-owned; + qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>; + qcom,qport = <6>; + qcom,bus-dev = <&fab_bimc>; + qcom,qos-mode = "fixed"; + qcom,prio-lvl = <2>; + qcom,prio-rd = <2>; + qcom,prio-wr = <2>; + qom,buswidth = <8>; + }; + + mas_gfx: mas-gfx { + cell-id = <26>; + label = "mas-gfx"; + qcom,ap-owned; + qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>; + qcom,qport = <2>; + qcom,bus-dev = <&fab_bimc>; + qcom,qos-mode = "fixed"; + qcom,prio-lvl = <0>; + qcom,prio-rd = <0>; + qcom,prio-wr = <0>; + qom,buswidth = <8>; + qcom,ws = <10000>; + qcom,gp = <5000>; + qcom,thmp = <50>; + }; + + bimc_snoc_mas: bimc-snoc-mas { + cell-id = <10016>; + label = "bimc_snoc_mas"; + qcom,ap-owned; + qcom,bus-dev = <&fab_bimc>; + qcom,connections = <&bimc_snoc_slv>; + qom,buswidth = <8>; + }; + + snoc_bimc_0_slv: snoc-bimc-0-slv { + cell-id = <10025>; + label = "snoc_bimc_0_slv"; + qcom,connections = <&slv_ebi_ch0>; + qcom,bus-dev = <&fab_bimc>; + qcom,slv-rpm-id = <24>; + qom,buswidth = <8>; + }; + + snoc_bimc_1_slv: snoc_bimc_1_slv { + cell-id = <10026>; + label = "snoc_bimc_1_slv"; + qcom,connections = <&slv_ebi_ch0>; + qcom,ap-owned; + qcom,bus-dev = <&fab_bimc>; + qom,buswidth = <8>; + }; + + slv_ebi_ch0: slv-ebi-ch0 { + cell-id = <512>; + label = "slv-ebi-ch0"; + qcom,bus-dev = <&fab_bimc>; + qcom,slv-rpm-id = <0>; + qom,buswidth = <8>; + }; + + slv_apps_l2: slv-apps-l2 { + cell-id = <514>; + label = "slv-apps-l2"; + qcom,bus-dev = <&fab_bimc>; + qom,buswidth = <8>; + }; + + /* PNOC nodes */ + snoc_pnoc_slv: snoc-pnoc-slv { + cell-id = <10028>; + label = "snoc-pnoc-slv"; + qcom,connections = <&pnoc_int_0>; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <8>; + }; + + pnoc_int_0: pnoc-int-0 { + cell-id = <10012>; + label = "pnoc-int-0"; + qcom,connections = <&pnoc_snoc_mas &pnoc_s_0 &pnoc_s_1 &pnoc_s_2 + &pnoc_s_3 &pnoc_s_4 &pnoc_s_8 &pnoc_s_9>; + qcom,bus-dev = <&fab_pnoc>; + qom,buswidth = <8>; + }; + + pnoc_int_1: pnoc-int-1 { + cell-id = <10013>; + label = "pnoc-int-1"; + qcom,connections = <&pnoc_snoc_mas>; + qcom,bus-dev = <&fab_pnoc>; + qom,buswidth = <8>; + }; + + pnoc_m_0: pnoc-m-0 { + cell-id = <10014>; + label = "pnoc-m-0"; + qcom,connections = <&pnoc_int_0>; + qcom,bus-dev = <&fab_pnoc>; + qom,buswidth = <8>; + }; + + pnoc_m_1: pnoc-m-1 { + cell-id = <10015>; + label = "pnoc-m-1"; + qcom,connections = <&pnoc_snoc_mas>; + qcom,bus-dev = <&fab_pnoc>; + qom,buswidth = <8>; + }; + + pnoc_s_0: pnoc-s-0 { + cell-id = <10018>; + label = "pnoc-s-0"; + qcom,connections = <&slv_clk_ctl &slv_tlmm &slv_tcsr + &slv_security &slv_mss>; + qcom,bus-dev = <&fab_pnoc>; + qom,buswidth = <4>; + }; + + pnoc_s_1: pnoc-s-1 { + cell-id = <10019>; + label = "pnoc-s-1"; + qcom,connections = <&slv_imem_cfg &slv_crypto_0_cfg + &slv_msg_ram &slv_pdm &slv_prng>; + qcom,bus-dev = <&fab_pnoc>; + qom,buswidth = <4>; + }; + + pnoc_s_2: pnoc-s-2 { + cell-id = <10020>; + label = "pnoc-s-2"; + qcom,connections = <&slv_spdm &slv_boot_rom &slv_bimc_cfg + &slv_pnoc_cfg &slv_pmic_arb>; + qcom,bus-dev = <&fab_pnoc>; + qom,buswidth = <4>; + }; + + pnoc_s_3: pnoc-s-3 { + cell-id = <10021>; + label = "pnoc-s-3"; + qcom,connections = <&slv_mpm &slv_snoc_cfg &slv_rbcpr_cfg + &slv_qdss_cfg &slv_dehr_cfg>; + qcom,bus-dev = <&fab_pnoc>; + qom,buswidth = <4>; + }; + + pnoc_s_4: pnoc-s-4 { + cell-id = <10022>; + label = "pnoc-s-4"; + qcom,connections = <&slv_venus_cfg &slv_camera_cfg + &slv_display_cfg>; + qcom,bus-dev = <&fab_pnoc>; + }; + + pnoc_s_8: pnoc-s-8 { + cell-id = <10023>; + label = "pnoc-s-8"; + qcom,connections = <&slv_usb_hs &slv_sdcc_1 &slv_blsp_1>; + qcom,bus-dev = <&fab_pnoc>; + qom,buswidth = <4>; + }; + + pnoc_s_9: pnoc-s-9 { + cell-id = <10024>; + label = "pnoc-s-9"; + qcom,connections = <&slv_sdcc_2 &slv_audio &slv_gfx_cfg>; + qcom,bus-dev = <&fab_pnoc>; + qom,buswidth = <4>; + }; + + slv_imem_cfg: slv-imem-cfg { + cell-id = <627>; + label = "slv-imem-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_crypto_0_cfg: slv-crypto-0-cfg { + cell-id = <625>; + label = "slv-crypto-0-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_msg_ram: slv-msg-ram { + cell-id = <535>; + label = "slv-msg-ram"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_pdm: slv-pdm { + cell-id = <577>; + label = "slv-pdm"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_prng: slv-prng { + cell-id = <618>; + label = "slv-prng"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_clk_ctl: slv-clk-ctl { + cell-id = <620>; + label = "slv-clk-ctl"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_mss: slv-mss { + cell-id = <521>; + label = "slv-mss"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_tlmm: slv-tlmm { + cell-id = <624>; + label = "slv-tlmm"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_tcsr: slv-tcsr { + cell-id = <579>; + label = "slv-tcsr"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_security: slv-security { + cell-id = <622>; + label = "slv-security"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_spdm: slv-spdm { + cell-id = <533>; + label = "slv-spdm"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_pnoc_cfg: slv-pnoc-cfg { + cell-id = <641>; + label = "slv-pnoc-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_pmic_arb: slv-pmic-arb { + cell-id = <632>; + label = "slv-pmic-arb"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_bimc_cfg: slv-bimc-cfg { + cell-id = <629>; + label = "slv-bimc-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_boot_rom: slv-boot-rom { + cell-id = <630>; + label = "slv-boot-rom"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_mpm: slv-mpm { + cell-id = <536>; + label = "slv-mpm"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_qdss_cfg: slv-qdss-cfg { + cell-id = <635>; + label = "slv-qdss-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_rbcpr_cfg: slv-rbcpr-cfg { + cell-id = <636>; + label = "slv-rbcpr-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_snoc_cfg: slv-snoc-cfg { + cell-id = <647>; + label = "slv-snoc-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_dehr_cfg: slv-dehr-cfg { + cell-id = <634>; + label = "slv-dehr-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_venus_cfg: slv-venus-cfg { + cell-id = <596>; + label = "slv-venus-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_display_cfg: slv-display-cfg { + cell-id = <590>; + label = "slv-display-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_camera_cfg: slv-camera-cfg { + cell-id = <589>; + label = "slv-camer-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_usb_hs: slv-usb-hs { + cell-id = <614>; + label = "slv-usb-hs"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_sdcc_1: slv-sdcc-1 { + cell-id = <606>; + label = "slv-sdcc-1"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_blsp_1: slv-blsp-1 { + cell-id = <613>; + label = "slv-blsp-1"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_sdcc_2: slv-sdcc-2 { + cell-id = <609>; + label = "slv-sdcc-2"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_gfx_cfg: slv-gfx-cfg { + cell-id = <598>; + label = "slv-gfx-cfg"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + slv_audio: slv-audio { + cell-id = <522>; + label = "slv-audio"; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + mas_blsp_1: mas-blsp_1 { + cell-id = <86>; + label = "mas-blsp-1"; + qcom,connections = <&pnoc_m_1>; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + mas_spdm: mas-spdm { + cell-id = <36>; + label = "mas-spdm"; + qcom,connections = <&pnoc_m_0>; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + mas_dehr: mas-dehr { + cell-id = <75>; + label = "mas-dehr"; + qcom,connections = <&pnoc_m_0>; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + mas_audio: mas-audio { + cell-id = <15>; + label = "mas-audio"; + qcom,connections = <&pnoc_m_0>; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + mas_usb_hs: mas-usb-hs { + cell-id = <87>; + label = "mas-usb-hs"; + qcom,connections = <&pnoc_m_1>; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <4>; + }; + + mas_pnoc_crypto_0: mas-pnoc-crypto-0 { + cell-id = <55>; + label = "mas-pnoc-crypto-0"; + qcom,connections = <&pnoc_int_1>; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <8>; + }; + + mas_pnoc_sdcc_1: mas-pnoc-sdcc-1 { + cell-id = <78>; + label = "mas-pnoc-sdcc-1"; + qcom,qport = <7>; + qcom,connections = <&pnoc_int_1>; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <8>; + }; + + mas_pnoc_sdcc_2: mas-pnoc-sdcc-2 { + cell-id = <81>; + label = "mas-pnoc-sdcc-2"; + qcom,qport = <8>; + qcom,connections = <&pnoc_int_1>; + qcom,bus-dev = <&fab_pnoc>; + qcom,buswidth = <8>; + }; + + pnoc_snoc_mas: pnoc-snoc-mas { + cell-id = <10010>; + label = "pnoc-snoc-mas"; + qcom,connections = <&pnoc_snoc_slv>; + qcom,bus-dev = <&fab_pnoc>; + qcom,mas-rpm-id = <29>; + qcom,buswidth = <8>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8916-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8916-coresight.dtsi new file mode 100644 index 000000000000..c008dc7a32bb --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8916-coresight.dtsi @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + + tpiu@820000 { + compatible = "arm,coresight-tpiu", "arm,primecell"; + reg = <0x820000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "atclk"; + + port { + tpiu_in: endpoint { + slave-mode; + remote-endpoint = <&replicator_out1>; + }; + }; + }; + + funnel@821000 { + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0x821000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "atclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + /* + * Not described input ports: + * 0 - connected to Resource and Power Manger CPU ETM + * 1 - not-connected + * 2 - connected to Modem CPU ETM + * 3 - not-connected + * 5 - not-connected + * 6 - connected trought funnel to Wireless CPU ETM + * 7 - connected to STM component + */ + port@4 { + reg = <4>; + funnel0_in4: endpoint { + slave-mode; + remote-endpoint = <&funnel1_out>; + }; + }; + port@8 { + reg = <0>; + funnel0_out: endpoint { + remote-endpoint = <&etf_in>; + }; + }; + }; + }; + + replicator@824000 { + compatible = "qcom,coresight-replicator1x", "arm,primecell"; + reg = <0x824000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "atclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + replicator_out0: endpoint { + remote-endpoint = <&etr_in>; + }; + }; + port@1 { + reg = <1>; + replicator_out1: endpoint { + remote-endpoint = <&tpiu_in>; + }; + }; + port@2 { + reg = <0>; + replicator_in: endpoint { + slave-mode; + remote-endpoint = <&etf_out>; + }; + }; + }; + }; + + etf@825000 { + compatible = "arm,coresight-tmc", "arm,primecell"; + reg = <0x825000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "atclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + etf_out: endpoint { + remote-endpoint = <&replicator_in>; + }; + }; + port@1 { + reg = <0>; + etf_in: endpoint { + slave-mode; + remote-endpoint = <&funnel0_out>; + }; + }; + }; + }; + + etr@826000 { + compatible = "arm,coresight-tmc", "arm,primecell"; + reg = <0x826000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "atclk"; + + port { + etr_in: endpoint { + slave-mode; + remote-endpoint = <&replicator_out0>; + }; + }; + }; + + funnel@841000 { /* APSS funnel only 4 inputs are used */ + compatible = "arm,coresight-funnel", "arm,primecell"; + reg = <0x841000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "atclk"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + funnel1_in0: endpoint { + slave-mode; + remote-endpoint = <&etm0_out>; + }; + }; + port@1 { + reg = <1>; + funnel1_in1: endpoint { + slave-mode; + remote-endpoint = <&etm1_out>; + }; + }; + port@2 { + reg = <2>; + funnel1_in2: endpoint { + slave-mode; + remote-endpoint = <&etm2_out>; + }; + }; + port@3 { + reg = <3>; + funnel1_in3: endpoint { + slave-mode; + remote-endpoint = <&etm3_out>; + }; + }; + port@4 { + reg = <0>; + funnel1_out: endpoint { + remote-endpoint = <&funnel0_in4>; + }; + }; + }; + }; + + etm@85c000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0x85c000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "atclk"; + + cpu = <&CPU0>; + + port { + etm0_out: endpoint { + remote-endpoint = <&funnel1_in0>; + }; + }; + }; + + etm@85d000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0x85d000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "atclk"; + + cpu = <&CPU1>; + + port { + etm1_out: endpoint { + remote-endpoint = <&funnel1_in1>; + }; + }; + }; + + etm@85e000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0x85e000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "atclk"; + + cpu = <&CPU2>; + + port { + etm2_out: endpoint { + remote-endpoint = <&funnel1_in2>; + }; + }; + }; + + etm@85f000 { + compatible = "arm,coresight-etm4x", "arm,primecell"; + reg = <0x85f000 0x1000>; + + clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>; + clock-names = "apb_pclk", "atclk"; + + cpu = <&CPU3>; + + port { + etm3_out: endpoint { + remote-endpoint = <&funnel1_in3>; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8916-iommu.dtsi b/arch/arm64/boot/dts/qcom/msm8916-iommu.dtsi new file mode 100644 index 000000000000..82acb8df2a8a --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm8916-iommu.dtsi @@ -0,0 +1,21 @@ +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "msm-iommu-v2.dtsi" + +&gfx_iommu { + status = "ok"; +}; + +&apps_iommu { + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi index 10c83e11c272..610668a37c4e 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi @@ -720,4 +720,106 @@ }; }; }; + + wcnss_pin_a: wcnss-active { + pinmux { + pins = "gpio40", "gpio41", "gpio42", "gpio43", "gpio44"; + function = "wcss_wlan"; + }; + + pinconf { + pins = "gpio40", "gpio41", "gpio42", "gpio43", "gpio44"; + drive-strength = <6>; + bias-pull-up; + }; + }; + + cci_lines { + cci0_default: cci0_default { + pinmux { + function = "cci_i2c"; + pins = "gpio29", "gpio30"; + }; + pinconf { + pins = "gpio29", "gpio30"; + drive-strength = <16>; + bias-disable; + }; + }; + cci0_sleep: cci0_sleep { + pinmux { + function = "cci_i2c"; + pins = "gpio29", "gpio30"; + }; + pinconf { + pins = "gpio29", "gpio30"; + drive-strength = <16>; + bias-disable; + }; + }; + }; + + camera_front_default: camera_front_default { + pinmux_pwdn { + function = "gpio"; + pins = "gpio33"; + }; + pinconf_pwdn { + pins = "gpio33"; + drive-strength = <16>; + bias-disable; + }; + + pinmux_rst { + function = "gpio"; + pins = "gpio28"; + }; + pinconf_rst { + pins = "gpio28"; + drive-strength = <16>; + bias-disable; + }; + + pinmux_mclk1 { + function = "cam_mclk1"; + pins = "gpio27"; + }; + pinconf_mclk1 { + pins = "gpio27"; + drive-strength = <16>; + bias-disable; + }; + }; + + camera_rear_default: camera_rear_default { + pinmux_pwdn { + function = "gpio"; + pins = "gpio34"; + }; + pinconf_pwdn { + pins = "gpio34"; + drive-strength = <16>; + bias-disable; + }; + + pinmux_rst { + function = "gpio"; + pins = "gpio35"; + }; + pinconf_rst { + pins = "gpio35"; + drive-strength = <16>; + bias-disable; + }; + + pinmux_mclk0 { + function = "cam_mclk0"; + pins = "gpio26"; + }; + pinconf_mclk0 { + pins = "gpio26"; + drive-strength = <16>; + bias-disable; + }; + }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 466ca5705c99..6e5f57df492e 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -14,6 +14,8 @@ #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/clock/qcom,gcc-msm8916.h> #include <dt-bindings/reset/qcom,gcc-msm8916.h> +#include <dt-bindings/thermal/thermal.h> +#include <dt-bindings/clock/qcom,rpmcc.h> / { model = "Qualcomm Technologies, Inc. MSM8916"; @@ -77,12 +79,12 @@ no-map; }; - mpss@86800000 { + mpss_mem: mpss@86800000 { reg = <0x0 0x86800000 0x0 0x2b00000>; no-map; }; - wcnss@89300000 { + wcnss_mem: wcnss@89300000 { reg = <0x0 0x89300000 0x0 0x600000>; no-map; }; @@ -91,6 +93,11 @@ no-map; reg = <0 0x8ea00000 0 0x100000>; }; + + venus_mem: venus@89900000 { + reg = <0x0 0x89900000 0x0 0x600000>; + no-map; + }; }; cpus { @@ -102,8 +109,13 @@ compatible = "arm,cortex-a53", "arm,armv8"; reg = <0x0>; next-level-cache = <&L2_0>; - enable-method = "psci"; - cpu-idle-states = <&CPU_SPC>; + clocks = <&apcs 0>; + cpu-supply = <&pm8916_spmi_s2>; + operating-points-v2 = <&cpu_opp_table>; + /* cooling options */ + cooling-min-level = <0>; + cooling-max-level = <7>; + #cooling-cells = <2>; }; CPU1: cpu@1 { @@ -113,6 +125,13 @@ next-level-cache = <&L2_0>; enable-method = "psci"; cpu-idle-states = <&CPU_SPC>; + clocks = <&apcs 0>; + cpu-supply = <&pm8916_spmi_s2>; + operating-points-v2 = <&cpu_opp_table>; + /* cooling options */ + cooling-min-level = <0>; + cooling-max-level = <7>; + #cooling-cells = <2>; }; CPU2: cpu@2 { @@ -122,6 +141,13 @@ next-level-cache = <&L2_0>; enable-method = "psci"; cpu-idle-states = <&CPU_SPC>; + clocks = <&apcs 0>; + cpu-supply = <&pm8916_spmi_s2>; + operating-points-v2 = <&cpu_opp_table>; + /* cooling options */ + cooling-min-level = <0>; + cooling-max-level = <7>; + #cooling-cells = <2>; }; CPU3: cpu@3 { @@ -131,6 +157,13 @@ next-level-cache = <&L2_0>; enable-method = "psci"; cpu-idle-states = <&CPU_SPC>; + clocks = <&apcs 0>; + cpu-supply = <&pm8916_spmi_s2>; + operating-points-v2 = <&cpu_opp_table>; + /* cooling options */ + cooling-min-level = <0>; + cooling-max-level = <7>; + #cooling-cells = <2>; }; L2_0: l2-cache { @@ -155,6 +188,21 @@ method = "smc"; }; + /* + * The CPR driver reads the initial voltage settings in efuse + * and populates OPPs. + */ + cpu_opp_table: opp_table0 { + compatible = "operating-points-v2"; + opp-shared; + + opp00 { + opp-hz = /bits/ 64 <200000000>; + opp-microvolt = <1050000>; + clock-latency-ns = <200000>; + }; + }; + pmu { compatible = "arm,armv8-pmuv3"; interrupts = <GIC_PPI 7 GIC_CPU_MASK_SIMPLE(4)>; @@ -179,6 +227,13 @@ type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu_alert0>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; cpu-thermal1 { @@ -199,6 +254,13 @@ type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu_alert1>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; }; @@ -225,15 +287,6 @@ }; }; - smem { - compatible = "qcom,smem"; - - memory-region = <&smem_mem>; - qcom,rpm-msg-ram = <&rpm_msg_ram>; - - hwlocks = <&tcsr_mutex 3>; - }; - firmware { scm: scm { compatible = "qcom,scm"; @@ -254,6 +307,108 @@ reg = <0x4ab000 0x4>; }; + camss: camss@1b00000 { + compatible = "qcom,msm8916-camss"; + reg = <0x1b0ac00 0x200>, + <0x1b00030 0x4>, + <0x1b0b000 0x200>, + <0x1b00038 0x4>, + <0x1b08000 0x100>, + <0x1b08400 0x100>, + <0x1b0a000 0x500>, + <0x1b00020 0x10>, + <0x1b10000 0x1000>; + reg-names = "csiphy0", + "csiphy0_clk_mux", + "csiphy1", + "csiphy1_clk_mux", + "csid0", + "csid1", + "ispif", + "csi_clk_mux", + "vfe0"; + interrupts = <GIC_SPI 78 0>, + <GIC_SPI 79 0>, + <GIC_SPI 51 0>, + <GIC_SPI 52 0>, + <GIC_SPI 55 0>, + <GIC_SPI 57 0>; + interrupt-names = "csiphy0", + "csiphy1", + "csid0", + "csid1", + "ispif", + "vfe0"; + power-domains = <&gcc VFE_GDSC>; + clocks = <&gcc GCC_CAMSS_TOP_AHB_CLK>, + <&gcc GCC_CAMSS_ISPIF_AHB_CLK>, + <&gcc GCC_CAMSS_CSI0PHYTIMER_CLK>, + <&gcc GCC_CAMSS_CSI1PHYTIMER_CLK>, + <&gcc GCC_CAMSS_CSI0_AHB_CLK>, + <&gcc GCC_CAMSS_CSI0_CLK>, + <&gcc GCC_CAMSS_CSI0PHY_CLK>, + <&gcc GCC_CAMSS_CSI0PIX_CLK>, + <&gcc GCC_CAMSS_CSI0RDI_CLK>, + <&gcc GCC_CAMSS_CSI1_AHB_CLK>, + <&gcc GCC_CAMSS_CSI1_CLK>, + <&gcc GCC_CAMSS_CSI1PHY_CLK>, + <&gcc GCC_CAMSS_CSI1PIX_CLK>, + <&gcc GCC_CAMSS_CSI1RDI_CLK>, + <&gcc GCC_CAMSS_AHB_CLK>, + <&gcc GCC_CAMSS_VFE0_CLK>, + <&gcc GCC_CAMSS_CSI_VFE0_CLK>, + <&gcc GCC_CAMSS_VFE_AHB_CLK>, + <&gcc GCC_CAMSS_VFE_AXI_CLK>; + clock-names = "camss_top_ahb", + "ispif_ahb", + "csiphy0_timer", + "csiphy1_timer", + "csi0_ahb", + "csi0", + "csi0_phy", + "csi0_pix", + "csi0_rdi", + "csi1_ahb", + "csi1", + "csi1_phy", + "csi1_pix", + "csi1_rdi", + "camss_ahb", + "camss_vfe_vfe", + "camss_csi_vfe", + "iface", + "bus"; + vdda-supply = <&pm8916_l2>; + iommus = <&apps_iommu 3>; + status = "disabled"; + ports { + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + cci: qcom,cci@1b0c000 { + compatible = "qcom,cci-v1.0.8"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x1b0c000 0x1000>; + reg-names = "cci"; + interrupts = <GIC_SPI 50 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "cci"; + clocks = <&gcc GCC_CAMSS_TOP_AHB_CLK>, + <&gcc GCC_CAMSS_CCI_AHB_CLK>, + <&gcc GCC_CAMSS_CCI_CLK>, + <&gcc GCC_CAMSS_AHB_CLK>; + clock-names = "camss_top_ahb", + "cci_ahb", + "cci", + "camss_ahb"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&cci0_default>; + pinctrl-1 = <&cci0_sleep>; + status = "disabled"; + }; + msmgpio: pinctrl@1000000 { compatible = "qcom,msm8916-pinctrl"; reg = <0x1000000 0x300000>; @@ -304,9 +459,15 @@ status = "disabled"; }; + a53pll: a53pll@0b016000 { + compatible = "qcom,a53pll-msm8916"; + reg = <0x0b016000 0x40>; + }; + apcs: syscon@b011000 { - compatible = "syscon"; + compatible = "qcom,a53cc-msm8916", "syscon"; reg = <0x0b011000 0x1000>; + #clock-cells = <1>; }; blsp1_uart2: serial@78b0000 { @@ -504,6 +665,15 @@ reg-names = "lpass-lpaif"; }; + lpass_codec: codec{ + compatible = "qcom,msm8916-wcd-digital-codec"; + reg = <0x0771c000 0x400>; + clocks = <&gcc GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK>, + <&gcc GCC_CODEC_DIGCODEC_CLK>; + clock-names = "ahbix-clk", "mclk"; + #sound-dai-cells = <1>; + }; + sdhc_1: sdhci@07824000 { compatible = "qcom,sdhci-msm-v4"; reg = <0x07824900 0x11c>, <0x07824000 0x800>; @@ -512,8 +682,10 @@ interrupts = <0 123 0>, <0 138 0>; interrupt-names = "hc_irq", "pwr_irq"; clocks = <&gcc GCC_SDCC1_APPS_CLK>, - <&gcc GCC_SDCC1_AHB_CLK>; - clock-names = "core", "iface"; + <&gcc GCC_SDCC1_AHB_CLK>, + <&xo_board>; + clock-names = "core", "iface", "xo"; + mmc-ddr-1_8v; bus-width = <8>; non-removable; status = "disabled"; @@ -527,8 +699,9 @@ interrupts = <0 125 0>, <0 221 0>; interrupt-names = "hc_irq", "pwr_irq"; clocks = <&gcc GCC_SDCC2_APPS_CLK>, - <&gcc GCC_SDCC2_AHB_CLK>; - clock-names = "core", "iface"; + <&gcc GCC_SDCC2_AHB_CLK>, + <&xo_board>; + clock-names = "core", "iface", "xo"; bus-width = <4>; status = "disabled"; }; @@ -562,6 +735,14 @@ qcom,otg-control = <2>; // PMIC qcom,manual-pullup; + qcom,msm-bus,name = "usb2"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <87 512 0 0>, + <87 512 80000 0>, + <87 512 6000 6000>; + clocks = <&gcc GCC_USB_HS_AHB_CLK>, <&gcc GCC_USB_HS_SYSTEM_CLK>, <&gcc GCC_USB2A_PHY_SLEEP_CLK>; @@ -685,6 +866,40 @@ #thermal-sensor-cells = <1>; }; + adreno-3xx@01c00000 { + compatible = "qcom,adreno-3xx"; + #stream-id-cells = <16>; + reg = <0x01c00000 0x20000>; + reg-names = "kgsl_3d0_reg_memory"; + interrupts = <0 33 0>; + interrupt-names = "kgsl_3d0_irq"; + clock-names = + "core_clk", + "iface_clk", + "mem_clk", + "mem_iface_clk", + "alt_mem_iface_clk", + "gfx3d_clk_src"; + clocks = + <&gcc GCC_OXILI_GFX3D_CLK>, + <&gcc GCC_OXILI_AHB_CLK>, + <&gcc GCC_OXILI_GMEM_CLK>, + <&gcc GCC_BIMC_GFX_CLK>, + <&gcc GCC_BIMC_GPU_CLK>, + <&gcc GFX3D_CLK_SRC>; + power-domains = <&gcc OXILI_GDSC>; + qcom,chipid = <0x03000600>; + qcom,gpu-pwrlevels { + compatible = "qcom,gpu-pwrlevels"; + qcom,gpu-pwrlevel@0 { + qcom,gpu-freq = <400000000>; + }; + qcom,gpu-pwrlevel@1 { + qcom,gpu-freq = <19200000>; + }; + }; + }; + mdss: mdss@1a00000 { compatible = "qcom,mdss"; reg = <0x1a00000 0x1000>, @@ -717,6 +932,13 @@ interrupt-parent = <&mdss>; interrupts = <0 0>; + qcom,msm-bus,name = "mdss_mdp"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = <22 512 0 0>, + <22 512 0 6400000>, + <22 512 0 6400000>; + clocks = <&gcc GCC_MDSS_AHB_CLK>, <&gcc GCC_MDSS_AXI_CLK>, <&gcc GCC_MDSS_MDP_CLK>, @@ -801,8 +1023,189 @@ clock-names = "iface_clk"; }; }; + + hexagon@4080000 { + compatible = "qcom,q6v5-pil"; + reg = <0x04080000 0x100>, + <0x04020000 0x040>; + + reg-names = "qdsp6", "rmb"; + + interrupts-extended = <&intc 0 24 1>, + <&hexagon_smp2p_in 0 0>, + <&hexagon_smp2p_in 1 0>, + <&hexagon_smp2p_in 2 0>, + <&hexagon_smp2p_in 3 0>; + interrupt-names = "wdog", "fatal", "ready", + "handover", "stop-ack"; + + clocks = <&gcc GCC_MSS_CFG_AHB_CLK>, + <&gcc GCC_MSS_Q6_BIMC_AXI_CLK>, + <&gcc GCC_BOOT_ROM_AHB_CLK>, + <&xo_board>; + clock-names = "iface", "bus", "mem", "xo"; + + qcom,smem-states = <&hexagon_smp2p_out 0>; + qcom,smem-state-names = "stop"; + + resets = <&scm 0>; + reset-names = "mss_restart"; + + cx-supply = <&pm8916_s1>; + mx-supply = <&pm8916_l3>; + pll-supply = <&pm8916_l7>; + + qcom,halt-regs = <&tcsr 0x18000 0x19000 0x1a000>; + + mba { + memory-region = <&mba_mem>; + }; + + mpss { + memory-region = <&mpss_mem>; + }; + + smd-edge { + interrupts = <0 25 IRQ_TYPE_EDGE_RISING>; + + qcom,smd-edge = <0>; + qcom,ipc = <&apcs 8 12>; + qcom,remote-pid = <1>; + + label = "hexagon"; + }; + }; + + uqfprom: eeprom@58000 { + compatible = "qcom,qfprom-msm8916"; + reg = <0x58000 0x7000>; + }; + + cpr@b018000 { + compatible = "qcom,cpr"; + reg = <0xb018000 0x1000>; + interrupts = <0 15 1>, <0 16 1>, <0 17 1>; + vdd-mx-supply = <&pm8916_l3>; + acc-syscon = <&tcsr>; + eeprom = <&uqfprom>; + + qcom,cpr-ref-clk = <19200>; + qcom,cpr-timer-delay-us = <5000>; + qcom,cpr-timer-cons-up = <0>; + qcom,cpr-timer-cons-down = <2>; + qcom,cpr-up-threshold = <0>; + qcom,cpr-down-threshold = <2>; + qcom,cpr-idle-clocks = <15>; + qcom,cpr-gcnt-us = <1>; + qcom,vdd-apc-step-up-limit = <1>; + qcom,vdd-apc-step-down-limit = <1>; + qcom,cpr-cpus = <&CPU0 &CPU1 &CPU2 &CPU3>; + }; + + pronto: wcnss@a21b000 { + compatible = "qcom,pronto-v2-pil", "qcom,pronto"; + reg = <0x0a204000 0x2000>, <0x0a202000 0x1000>, <0x0a21b000 0x3000>; + reg-names = "ccu", "dxe", "pmu"; + + memory-region = <&wcnss_mem>; + + interrupts-extended = <&intc 0 149 IRQ_TYPE_EDGE_RISING>, + <&wcnss_smp2p_in 0 IRQ_TYPE_EDGE_RISING>, + <&wcnss_smp2p_in 1 IRQ_TYPE_EDGE_RISING>, + <&wcnss_smp2p_in 2 IRQ_TYPE_EDGE_RISING>, + <&wcnss_smp2p_in 3 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack"; + + vddmx-supply = <&pm8916_l3>; + vddpx-supply = <&pm8916_l7>; + + qcom,state = <&wcnss_smp2p_out 0>; + qcom,state-names = "stop"; + + pinctrl-names = "default"; + pinctrl-0 = <&wcnss_pin_a>; + + status = "disabled"; + + iris { + compatible = "qcom,wcn3620"; + + clocks = <&rpmcc RPM_SMD_RF_CLK2>; + clock-names = "xo"; + + vddxo-supply = <&pm8916_l7>; + vddrfa-supply = <&pm8916_s3>; + vddpa-supply = <&pm8916_l9>; + vdddig-supply = <&pm8916_l5>; + }; + + smd-edge { + interrupts = <0 142 1>; + + qcom,ipc = <&apcs 8 17>; + qcom,smd-edge = <6>; + qcom,remote-pid = <4>; + + label = "pronto"; + + wcnss { + compatible = "qcom,wcnss"; + qcom,smd-channels = "WCNSS_CTRL"; + + qcom,mmio = <&pronto>; + + bt { + compatible = "qcom,wcnss-bt"; + }; + + wifi { + compatible = "qcom,wcnss-wlan"; + + interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>, + <0 146 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "tx", "rx"; + + qcom,smem-states = <&apps_smsm 10>, <&apps_smsm 9>; + qcom,smem-state-names = "tx-enable", "tx-rings-empty"; + }; + }; + }; + }; + + venus: video-codec@1d00000 { + compatible = "qcom,msm8916-venus"; + reg = <0x01d00000 0xff000>; + interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; + power-domains = <&gcc VENUS_GDSC>; + clocks = <&gcc GCC_VENUS0_VCODEC0_CLK>, + <&gcc GCC_VENUS0_AHB_CLK>, + <&gcc GCC_VENUS0_AXI_CLK>; + clock-names = "core", "iface", "bus"; + iommus = <&apps_iommu 5>; + memory-region = <&venus_mem>; + status = "disabled"; + + video-decoder { + compatible = "venus-decoder"; + }; + + video-encoder { + compatible = "venus-encoder"; + }; + }; + }; + + + smem { + compatible = "qcom,smem"; + + memory-region = <&smem_mem>; + qcom,rpm-msg-ram = <&rpm_msg_ram>; + + hwlocks = <&tcsr_mutex 3>; }; + smd { compatible = "qcom,smd"; @@ -816,10 +1219,15 @@ qcom,smd-channels = "rpm_requests"; rpmcc: qcom,rpmcc { - compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc"; + //compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc"; + compatible = "qcom,rpmcc-msm8916"; #clock-cells = <1>; }; + msm-bus { + compatible = "qcom,rpm-msm-bus"; + }; + smd_rpm_regulators: pm8916-regulators { compatible = "qcom,rpm-pm8916-regulators"; @@ -934,3 +1342,6 @@ }; #include "msm8916-pins.dtsi" +#include "msm8916-coresight.dtsi" +#include "msm8916-iommu.dtsi" +#include "msm8916-bus.dtsi" diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 338f82a7fdc7..699f07360d2b 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -192,14 +192,14 @@ }; clocks { - xo_board { + xo_board: xo_board { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <19200000>; clock-output-names = "xo_board"; }; - sleep_clk { + sleep_clk: sleep_clk { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <32764>; @@ -347,9 +347,10 @@ interrupts = <0 125 0>, <0 221 0>; interrupt-names = "hc_irq", "pwr_irq"; - clock-names = "iface", "core"; + clock-names = "iface", "core", "xo"; clocks = <&gcc GCC_SDCC2_AHB_CLK>, - <&gcc GCC_SDCC2_APPS_CLK>; + <&gcc GCC_SDCC2_APPS_CLK>, + <&xo_board>; bus-width = <4>; }; diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi index f71679b15d54..7b0720fe307f 100644 --- a/arch/arm64/boot/dts/qcom/pm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi @@ -1,4 +1,5 @@ #include <dt-bindings/iio/qcom,spmi-vadc.h> +#include <dt-bindings/input/input.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/spmi/spmi.h> @@ -17,12 +18,15 @@ interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>; }; - pwrkey@800 { + spmi_pon: pwrkey@800 { compatible = "qcom,pm8941-pwrkey"; reg = <0x800>; - interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>; + interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>, + <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>; debounce = <15625>; bias-pull-up; + resin-pull-up; + linux,code = <KEY_RESTART>; }; pm8916_gpios: gpios@c000 { @@ -91,9 +95,169 @@ }; pm8916_1: pm8916@1 { - compatible = "qcom,spmi-pmic"; + compatible = "qcom,pm8916", "qcom,spmi-pmic"; reg = <0x1 SPMI_USID>; #address-cells = <1>; #size-cells = <0>; + + wcd_codec: codec@f000 { + compatible = "qcom,pm8916-wcd-analog-codec"; + reg = <0xf000 0x200>; + reg-names = "pmic-codec-core"; + clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>; + clock-names = "mclk"; + interrupt-parent = <&spmi_bus>; + interrupts = <0x1 0xf0 0x0 IRQ_TYPE_NONE>, + <0x1 0xf0 0x1 IRQ_TYPE_NONE>, + <0x1 0xf0 0x2 IRQ_TYPE_NONE>, + <0x1 0xf0 0x3 IRQ_TYPE_NONE>, + <0x1 0xf0 0x4 IRQ_TYPE_NONE>, + <0x1 0xf0 0x5 IRQ_TYPE_NONE>, + <0x1 0xf0 0x6 IRQ_TYPE_NONE>, + <0x1 0xf0 0x7 IRQ_TYPE_NONE>, + <0x1 0xf1 0x0 IRQ_TYPE_NONE>, + <0x1 0xf1 0x1 IRQ_TYPE_NONE>, + <0x1 0xf1 0x2 IRQ_TYPE_NONE>, + <0x1 0xf1 0x3 IRQ_TYPE_NONE>, + <0x1 0xf1 0x4 IRQ_TYPE_NONE>, + <0x1 0xf1 0x5 IRQ_TYPE_NONE>; + interrupt-names = "cdc_spk_cnp_int", + "cdc_spk_clip_int", + "cdc_spk_ocp_int", + "mbhc_ins_rem_det1", + "mbhc_but_rel_det", + "mbhc_but_press_det", + "mbhc_ins_rem_det", + "mbhc_switch_int", + "cdc_ear_ocp_int", + "cdc_hphr_ocp_int", + "cdc_hphl_ocp_det", + "cdc_ear_cnp_int", + "cdc_hphr_cnp_int", + "cdc_hphl_cnp_int"; + vdd-cdc-io-supply = <&pm8916_l5>; + vdd-cdc-tx-rx-cx-supply = <&pm8916_l5>; + vdd-micbias-supply = <&pm8916_l13>; + #sound-dai-cells = <1>; + + }; + + regulators { + compatible = "qcom,pm8916-regulators"; + #address-cells = <1>; + #size-cells = <1>; + + s1@1400 { + reg = <0x1400 0x300>; + status = "disabled"; + }; + + pm8916_spmi_s2: s2@1700 { + reg = <0x1700 0x300>; + status = "ok"; + regulator-min-microvolt = <1050000>; + regulator-max-microvolt = <1350000>; + }; + + s3@1a00 { + reg = <0x1a00 0x300>; + status = "disabled"; + }; + + s4@1d00 { + reg = <0x1d00 0x300>; + status = "disabled"; + }; + + l1@4000 { + reg = <0x4000 0x100>; + status = "disabled"; + }; + + l2@4100 { + reg = <0x4100 0x100>; + status = "disabled"; + }; + + l3@4200 { + reg = <0x4200 0x100>; + status = "disabled"; + }; + + l4@4300 { + reg = <0x4300 0x100>; + status = "disabled"; + }; + + l5@4400 { + reg = <0x4400 0x100>; + status = "disabled"; + }; + + l6@4500 { + reg = <0x4500 0x100>; + status = "disabled"; + }; + + l7@4600 { + reg = <0x4600 0x100>; + status = "disabled"; + }; + + l8@4700 { + reg = <0x4700 0x100>; + status = "disabled"; + }; + + l9@4800 { + reg = <0x4800 0x100>; + status = "disabled"; + }; + + l10@4900 { + reg = <0x4900 0x100>; + status = "disabled"; + }; + + l11@4a00 { + reg = <0x4a00 0x100>; + status = "disabled"; + }; + + l12@4b00 { + reg = <0x4b00 0x100>; + status = "disabled"; + }; + + l13@4c00 { + reg = <0x4c00 0x100>; + status = "disabled"; + }; + + l14@4d00 { + reg = <0x4d00 0x100>; + status = "disabled"; + }; + + l15@4e00 { + reg = <0x4e00 0x100>; + status = "disabled"; + }; + + l16@4f00 { + reg = <0x4f00 0x100>; + status = "disabled"; + }; + + l17@5000 { + reg = <0x5000 0x100>; + status = "disabled"; + }; + + l18@5100 { + reg = <0x5100 0x100>; + status = "disabled"; + }; + }; }; }; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index dab2cb0c1f1c..ccb4d7812469 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -30,7 +30,6 @@ CONFIG_PROFILING=y CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set # CONFIG_IOSCHED_DEADLINE is not set CONFIG_ARCH_SUNXI=y CONFIG_ARCH_ALPINE=y @@ -60,7 +59,6 @@ CONFIG_ARCH_XGENE=y CONFIG_ARCH_ZX=y CONFIG_ARCH_ZYNQMP=y CONFIG_PCI=y -CONFIG_PCI_MSI=y CONFIG_PCI_IOV=y CONFIG_PCI_AARDVARK=y CONFIG_PCIE_RCAR=y @@ -77,13 +75,16 @@ CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_CMA=y CONFIG_SECCOMP=y -CONFIG_XEN=y CONFIG_KEXEC=y +CONFIG_XEN=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y -CONFIG_CPU_IDLE=y CONFIG_ARM_CPUIDLE=y CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPUFREQ_DT=y CONFIG_ARM_BIG_LITTLE_CPUFREQ=y CONFIG_ARM_SCPI_CPUFREQ=y @@ -122,6 +123,8 @@ CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y +CONFIG_QRTR=y +CONFIG_QRTR_SMD=y CONFIG_BPF_JIT=y CONFIG_BT=m CONFIG_BT_HIDP=m @@ -131,7 +134,9 @@ CONFIG_BT_LEDS=y # CONFIG_BT_DEBUGFS is not set CONFIG_BT_HCIUART=m CONFIG_BT_HCIUART_LL=y +CONFIG_BT_QCOMSMD=m CONFIG_CFG80211=m +# CONFIG_CFG80211_DEFAULT_PS is not set CONFIG_MAC80211=m CONFIG_MAC80211_LEDS=y CONFIG_RFKILL=m @@ -141,6 +146,8 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=128 +CONFIG_SIMPLE_PM_BUS=y CONFIG_MTD=y CONFIG_MTD_M25P80=y CONFIG_MTD_SPI_NOR=y @@ -152,6 +159,10 @@ CONFIG_SRAM=y CONFIG_BLK_DEV_SD=y CONFIG_SCSI_SAS_ATA=y CONFIG_SCSI_HISI_SAS=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PCI=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y @@ -159,8 +170,8 @@ CONFIG_AHCI_CEVA=y CONFIG_AHCI_MVEBU=y CONFIG_AHCI_XGENE=y CONFIG_AHCI_QORIQ=y -CONFIG_SATA_RCAR=y CONFIG_SATA_SIL24=y +CONFIG_SATA_RCAR=y CONFIG_PATA_PLATFORM=y CONFIG_PATA_OF_PLATFORM=y CONFIG_NETDEVICES=y @@ -171,6 +182,7 @@ CONFIG_VETH=m CONFIG_VIRTIO_NET=y CONFIG_AMD_XGBE=y CONFIG_NET_XGENE=y +CONFIG_ATL1C=y CONFIG_MACB=y CONFIG_HNS_DSAF=y CONFIG_HNS_ENET=y @@ -182,18 +194,19 @@ CONFIG_RAVB=y CONFIG_SMC91X=y CONFIG_SMSC911X=y CONFIG_STMMAC_ETH=m -CONFIG_REALTEK_PHY=m CONFIG_MICREL_PHY=y +CONFIG_REALTEK_PHY=m CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m -CONFIG_USB_USBNET=m +CONFIG_USB_USBNET=y CONFIG_USB_NET_DM9601=m CONFIG_USB_NET_SR9800=m CONFIG_USB_NET_SMSC75XX=m CONFIG_USB_NET_SMSC95XX=m CONFIG_USB_NET_PLUSB=m CONFIG_USB_NET_MCS7830=m +CONFIG_WCN36XX=m CONFIG_WL18XX=m CONFIG_WLCORE_SDIO=m CONFIG_INPUT_EVDEV=y @@ -212,14 +225,14 @@ CONFIG_SERIAL_8250_UNIPHIER=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_MESON=y +CONFIG_SERIAL_MESON_CONSOLE=y CONFIG_SERIAL_SAMSUNG=y CONFIG_SERIAL_SAMSUNG_CONSOLE=y CONFIG_SERIAL_TEGRA=y CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=11 CONFIG_SERIAL_SH_SCI_CONSOLE=y -CONFIG_SERIAL_MESON=y -CONFIG_SERIAL_MESON_CONSOLE=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_XILINX_PS_UART=y @@ -233,6 +246,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_IMX=y CONFIG_I2C_MESON=y CONFIG_I2C_MV64XXX=y +CONFIG_I2C_QCOM_CCI=m CONFIG_I2C_QUP=y CONFIG_I2C_TEGRA=y CONFIG_I2C_UNIPHIER_F=y @@ -243,8 +257,8 @@ CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_ORION=y CONFIG_SPI_PL022=y CONFIG_SPI_QUP=y -CONFIG_SPI_SPIDEV=m CONFIG_SPI_S3C64XX=y +CONFIG_SPI_SPIDEV=m CONFIG_SPMI=y CONFIG_PINCTRL_SINGLE=y CONFIG_PINCTRL_MAX77620=y @@ -260,30 +274,29 @@ CONFIG_GPIO_XGENE=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_PCA953X_IRQ=y CONFIG_GPIO_MAX77620=y +CONFIG_POWER_AVS=y +CONFIG_QCOM_CPR=y CONFIG_POWER_RESET_MSM=y -CONFIG_BATTERY_BQ27XXX=y CONFIG_POWER_RESET_XGENE=y CONFIG_POWER_RESET_SYSCON=y -CONFIG_SENSORS_LM90=m -CONFIG_SENSORS_INA2XX=m -CONFIG_SENSORS_ARM_SCPI=y -CONFIG_THERMAL=y -CONFIG_THERMAL_EMULATION=y +CONFIG_BATTERY_BQ27XXX=y +# CONFIG_HWMON is not set CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y CONFIG_CPU_THERMAL=y +CONFIG_THERMAL_EMULATION=y CONFIG_EXYNOS_THERMAL=y +CONFIG_QCOM_TSENS=y CONFIG_WATCHDOG=y -CONFIG_RENESAS_WDT=y CONFIG_S3C2410_WATCHDOG=y CONFIG_MESON_GXBB_WATCHDOG=m CONFIG_MESON_WATCHDOG=m +CONFIG_RENESAS_WDT=y +CONFIG_MFD_CROS_EC=y +CONFIG_MFD_CROS_EC_I2C=y +CONFIG_MFD_HI655X_PMIC=y CONFIG_MFD_MAX77620=y CONFIG_MFD_SPMI_PMIC=y CONFIG_MFD_SEC_CORE=y -CONFIG_MFD_HI655X_PMIC=y -CONFIG_REGULATOR=y -CONFIG_MFD_CROS_EC=y -CONFIG_MFD_CROS_EC_I2C=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y CONFIG_REGULATOR_HI655X=y @@ -292,43 +305,54 @@ CONFIG_REGULATOR_PWM=y CONFIG_REGULATOR_QCOM_SMD_RPM=y CONFIG_REGULATOR_QCOM_SPMI=y CONFIG_REGULATOR_S2MPS11=y -CONFIG_DRM=m -CONFIG_DRM_NOUVEAU=m -CONFIG_DRM_TEGRA=m -CONFIG_DRM_PANEL_SIMPLE=m -CONFIG_DRM_I2C_ADV7511=m -CONFIG_DRM_HISI_KIRIN=m -CONFIG_FB=y +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_VIDEO_QCOM_CAMSS=m +CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_VIDEO_QCOM_VENUS=m +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set +CONFIG_VIDEO_OV5645=m +CONFIG_DRM=y +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +# CONFIG_DRM_MSM_HDMI_HDCP is not set +CONFIG_DRM_PANEL_SIMPLE=y +CONFIG_DRM_I2C_ADV7511=y +CONFIG_DRM_I2C_ADV7511_AUDIO=y CONFIG_FB_ARMCLCD=y CONFIG_BACKLIGHT_GENERIC=m CONFIG_BACKLIGHT_LP855X=m -CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SOC=y -CONFIG_SND_SOC_RCAR=y +CONFIG_SND_SOC_QCOM=y +CONFIG_SND_SOC_APQ8016_SBC=y CONFIG_SND_SOC_SAMSUNG=y +CONFIG_SND_SOC_RCAR=y CONFIG_SND_SOC_AK4613=y +CONFIG_SND_SOC_MSM8916_WCD_ANALOG=y +CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=y CONFIG_USB=y CONFIG_USB_OTG=y CONFIG_USB_XHCI_HCD=y -CONFIG_USB_XHCI_PLATFORM=y -CONFIG_USB_XHCI_RCAR=y -CONFIG_USB_EHCI_EXYNOS=y CONFIG_USB_XHCI_TEGRA=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_MSM=y +CONFIG_USB_EHCI_EXYNOS=y CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_OHCI_EXYNOS=y CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_EXYNOS=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_RENESAS_USBHS=m CONFIG_USB_STORAGE=y -CONFIG_USB_DWC2=y CONFIG_USB_DWC3=y +CONFIG_USB_DWC2=y CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_HOST=y @@ -357,7 +381,6 @@ CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y CONFIG_LEDS_SYSCON=y -CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_LEDS_TRIGGER_CPU=y CONFIG_RTC_CLASS=y @@ -365,11 +388,11 @@ CONFIG_RTC_DRV_MAX77686=y CONFIG_RTC_DRV_S5M=y CONFIG_RTC_DRV_DS3232=y CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_S3C=y CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_SUN6I=y CONFIG_RTC_DRV_TEGRA=y CONFIG_RTC_DRV_XGENE=y -CONFIG_RTC_DRV_S3C=y CONFIG_DMADEVICES=y CONFIG_PL330_DMA=y CONFIG_TEGRA20_APB_DMA=y @@ -389,31 +412,46 @@ CONFIG_COMMON_CLK_CS2000_CP=y CONFIG_COMMON_CLK_S2MPS11=y CONFIG_CLK_QORIQ=y CONFIG_COMMON_CLK_QCOM=y +CONFIG_QCOM_CLK_SMD_RPM=y CONFIG_MSM_GCC_8916=y CONFIG_MSM_MMCC_8996=y +CONFIG_QCOM_A53CC=y +CONFIG_QCOM_A53PLL=y +CONFIG_QCOM_HFPLL=y +CONFIG_KPSS_XCC=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_MAILBOX=y CONFIG_ARM_MHU=y CONFIG_HI6220_MBOX=y +CONFIG_QCOM_IOMMU_V1=y CONFIG_ARM_SMMU=y +CONFIG_REMOTEPROC=y +CONFIG_QCOM_Q6V5_PIL=y +CONFIG_QCOM_WCNSS_PIL=m +CONFIG_RPMSG_CHAR=y +CONFIG_RPMSG_QCOM_SMD=y CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMD=y CONFIG_QCOM_SMD_RPM=y +CONFIG_QCOM_SMP2P=y +CONFIG_QCOM_SMSM=y +CONFIG_QCOM_WCNSS_CTRL=y +CONFIG_QTI_LNX_GPS_PROXY=y CONFIG_ARCH_TEGRA_132_SOC=y CONFIG_ARCH_TEGRA_210_SOC=y CONFIG_EXTCON_USB_GPIO=y +CONFIG_IIO=y +CONFIG_EXYNOS_ADC=y CONFIG_PWM=y +CONFIG_PWM_SAMSUNG=y CONFIG_PWM_TEGRA=m -CONFIG_COMMON_RESET_HI6220=y CONFIG_PHY_RCAR_GEN3_USB2=y CONFIG_PHY_HI6220_USB=y CONFIG_PHY_XGENE=y CONFIG_PHY_TEGRA_XUSB=y +CONFIG_NVMEM=y +CONFIG_QCOM_QFPROM=y CONFIG_ARM_SCPI_PROTOCOL=y CONFIG_ACPI=y -CONFIG_IIO=y -CONFIG_EXYNOS_ADC=y -CONFIG_PWM_SAMSUNG=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -427,11 +465,12 @@ CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_OVERLAY_FS=m CONFIG_VFAT_FS=y -CONFIG_TMPFS=y CONFIG_HUGETLBFS=y CONFIG_CONFIGFS_FS=y CONFIG_EFIVAR_FS=y CONFIG_SQUASHFS=y +CONFIG_UFS_FS=y +CONFIG_UFS_DEBUG=y CONFIG_NFS_FS=y CONFIG_NFS_V4=y CONFIG_NFS_V4_1=y @@ -440,7 +479,6 @@ CONFIG_ROOT_NFS=y CONFIG_9P_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_VIRTUALIZATION=y CONFIG_KVM=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y @@ -452,6 +490,12 @@ CONFIG_LOCKUP_DETECTOR=y # CONFIG_DEBUG_PREEMPT is not set # CONFIG_FTRACE is not set CONFIG_MEMTEST=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SINK_TPIU=y +CONFIG_CORESIGHT_SINK_ETBV10=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_QCOM_REPLICATOR=y CONFIG_SECURITY=y CONFIG_CRYPTO_ECHAINIV=y CONFIG_CRYPTO_ANSI_CPRNG=y @@ -461,5 +505,4 @@ CONFIG_CRYPTO_SHA2_ARM64_CE=y CONFIG_CRYPTO_GHASH_ARM64_CE=y CONFIG_CRYPTO_AES_ARM64_CE_CCM=y CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -# CONFIG_CRYPTO_AES_ARM64_NEON_BLK is not set CONFIG_CRYPTO_CRC32_ARM64=y diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 01753cd7d3f0..af77aab5b349 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -154,9 +154,9 @@ void machine_restart(char *cmd) efi_reboot(reboot_mode, NULL); /* Now call the architecture specific reboot code. */ - if (arm_pm_restart) - arm_pm_restart(reboot_mode, cmd); - else +// if (arm_pm_restart) +// arm_pm_restart(reboot_mode, cmd); +// else do_kernel_restart(cmd); /* diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index b5bf46ce873b..16996d159e46 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -808,140 +808,32 @@ static struct dma_map_ops iommu_dma_ops = { .mapping_error = iommu_dma_mapping_error, }; -/* - * TODO: Right now __iommu_setup_dma_ops() gets called too early to do - * everything it needs to - the device is only partially created and the - * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we - * need this delayed attachment dance. Once IOMMU probe ordering is sorted - * to move the arch_setup_dma_ops() call later, all the notifier bits below - * become unnecessary, and will go away. - */ -struct iommu_dma_notifier_data { - struct list_head list; - struct device *dev; - const struct iommu_ops *ops; - u64 dma_base; - u64 size; -}; -static LIST_HEAD(iommu_dma_masters); -static DEFINE_MUTEX(iommu_dma_notifier_lock); - -static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, - u64 dma_base, u64 size) -{ - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - - /* - * If the IOMMU driver has the DMA domain support that we require, - * then the IOMMU core will have already configured a group for this - * device, and allocated the default domain for that group. - */ - if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) { - pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", - dev_name(dev)); - return false; - } - - dev->archdata.dma_ops = &iommu_dma_ops; - return true; -} - -static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops, - u64 dma_base, u64 size) -{ - struct iommu_dma_notifier_data *iommudata; - - iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL); - if (!iommudata) - return; - - iommudata->dev = dev; - iommudata->ops = ops; - iommudata->dma_base = dma_base; - iommudata->size = size; - - mutex_lock(&iommu_dma_notifier_lock); - list_add(&iommudata->list, &iommu_dma_masters); - mutex_unlock(&iommu_dma_notifier_lock); -} - -static int __iommu_attach_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct iommu_dma_notifier_data *master, *tmp; - - if (action != BUS_NOTIFY_BIND_DRIVER) - return 0; - - mutex_lock(&iommu_dma_notifier_lock); - list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) { - if (data == master->dev && do_iommu_attach(master->dev, - master->ops, master->dma_base, master->size)) { - list_del(&master->list); - kfree(master); - break; - } - } - mutex_unlock(&iommu_dma_notifier_lock); - return 0; -} - -static int __init register_iommu_dma_ops_notifier(struct bus_type *bus) -{ - struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL); - int ret; - - if (!nb) - return -ENOMEM; - - nb->notifier_call = __iommu_attach_notifier; - - ret = bus_register_notifier(bus, nb); - if (ret) { - pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n", - bus->name); - kfree(nb); - } - return ret; -} - static int __init __iommu_dma_init(void) { - int ret; - - ret = iommu_dma_init(); - if (!ret) - ret = register_iommu_dma_ops_notifier(&platform_bus_type); - if (!ret) - ret = register_iommu_dma_ops_notifier(&amba_bustype); -#ifdef CONFIG_PCI - if (!ret) - ret = register_iommu_dma_ops_notifier(&pci_bus_type); -#endif - return ret; + return iommu_dma_init(); } arch_initcall(__iommu_dma_init); static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *ops) { - struct iommu_group *group; + struct iommu_domain *domain; if (!ops) return; + /* - * TODO: As a concession to the future, we're ready to handle being - * called both early and late (i.e. after bus_add_device). Once all - * the platform bus code is reworked to call us late and the notifier - * junk above goes away, move the body of do_iommu_attach here. + * The IOMMU core code allocates the default DMA domain, which the + * underlying IOMMU driver needs to support via the dma-iommu layer. */ - group = iommu_group_get(dev); - if (group) { - do_iommu_attach(dev, ops, dma_base, size); - iommu_group_put(group); - } else { - queue_iommu_attach(dev, ops, dma_base, size); + domain = iommu_get_domain_for_dev(dev); + if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) { + pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", + dev_name(dev)); + return; } + + dev->archdata.dma_ops = &iommu_dma_ops; } void arch_teardown_dma_ops(struct device *dev) diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 73c9c7fa9001..c05f24107bfc 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -176,7 +176,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; - enum dev_dma_attr attr; if (has_acpi_companion(dev)) { if (acpi_dev) { @@ -233,11 +232,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) if (!has_acpi_companion(dev)) ACPI_COMPANION_SET(dev, acpi_dev); - attr = acpi_get_dma_attr(acpi_dev); - if (attr != DEV_DMA_NOT_SUPPORTED) - arch_setup_dma_ops(dev, 0, 0, NULL, - attr == DEV_DMA_COHERENT); - acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, physical_node_name); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index d76cd97a98b6..01cba11d8596 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -19,6 +19,7 @@ #include <linux/device.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/wait.h> @@ -351,6 +352,10 @@ re_probe: if (ret) goto pinctrl_bind_failed; + ret = dma_configure(dev); + if (ret) + goto dma_failed; + if (driver_sysfs_add(dev)) { printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", __func__, dev_name(dev)); @@ -412,6 +417,8 @@ re_probe: goto done; probe_failed: + dma_deconfigure(dev); +dma_failed: if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); @@ -796,6 +803,8 @@ static void __device_release_driver(struct device *dev) dev->bus->remove(dev); else if (drv->remove) drv->remove(dev); + + dma_deconfigure(dev); devres_release_all(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 8f8b68c80986..cc2cf0b25e3b 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -7,9 +7,11 @@ * This file is released under the GPLv2. */ +#include <linux/acpi.h> #include <linux/dma-mapping.h> #include <linux/export.h> #include <linux/gfp.h> +#include <linux/of_device.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -341,3 +343,35 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) vunmap(cpu_addr); } #endif + +/* + * Common configuration to enable DMA API use for a device + */ +#include <linux/pci.h> + +int dma_configure(struct device *dev) +{ + struct device *bridge = NULL, *dma_dev = dev; + int ret = 0; + + if (dev_is_pci(dev)) { + bridge = pci_get_host_bridge_device(to_pci_dev(dev)); + dma_dev = bridge; + if (IS_ENABLED(CONFIG_OF) && dma_dev->parent && + dma_dev->parent->of_node) + dma_dev = dma_dev->parent; + } + + if (dma_dev->of_node) + ret = of_dma_configure(dev, dma_dev->of_node); + + if (bridge) + pci_put_host_bridge_device(bridge); + + return ret; +} + +void dma_deconfigure(struct device *dev) +{ + of_dma_deconfigure(dev); +} diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 6441dfda489f..587d56406bef 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c @@ -32,9 +32,10 @@ LIST_HEAD(opp_tables); /* Lock to allow exclusive modification to the device and opp lists */ DEFINE_MUTEX(opp_table_lock); -#define opp_rcu_lockdep_assert() \ +#define opp_rcu_lockdep_assert(s) \ do { \ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !(s && srcu_read_lock_held(s)) && \ !lockdep_is_held(&opp_table_lock), \ "Missing rcu_read_lock() or " \ "opp_table_lock protection"); \ @@ -72,7 +73,7 @@ struct opp_table *_find_opp_table(struct device *dev) { struct opp_table *opp_table; - opp_rcu_lockdep_assert(); + opp_rcu_lockdep_assert(NULL); if (IS_ERR_OR_NULL(dev)) { pr_err("%s: Invalid parameters\n", __func__); @@ -106,7 +107,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) struct dev_pm_opp *tmp_opp; unsigned long v = 0; - opp_rcu_lockdep_assert(); + opp_rcu_lockdep_assert(NULL); tmp_opp = rcu_dereference(opp); if (IS_ERR_OR_NULL(tmp_opp)) @@ -138,7 +139,7 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) struct dev_pm_opp *tmp_opp; unsigned long f = 0; - opp_rcu_lockdep_assert(); + opp_rcu_lockdep_assert(NULL); tmp_opp = rcu_dereference(opp); if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) @@ -150,6 +151,27 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) } EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); +struct regulator *dev_pm_opp_get_regulator(struct device *dev) +{ + struct opp_table *opp_table; + struct regulator *reg; + + rcu_read_lock(); + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + rcu_read_unlock(); + return ERR_CAST(opp_table); + } + + reg = opp_table->regulator; + + rcu_read_unlock(); + + return reg; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_regulator); + /** * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not * @opp: opp for which turbo mode is being verified @@ -172,7 +194,7 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) { struct dev_pm_opp *tmp_opp; - opp_rcu_lockdep_assert(); + opp_rcu_lockdep_assert(NULL); tmp_opp = rcu_dereference(opp); if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { @@ -300,7 +322,7 @@ struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) { struct opp_table *opp_table; - opp_rcu_lockdep_assert(); + opp_rcu_lockdep_assert(NULL); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table) || !opp_table->suspend_opp || @@ -380,7 +402,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, struct opp_table *opp_table; struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); - opp_rcu_lockdep_assert(); + opp_rcu_lockdep_assert(NULL); opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) { @@ -444,7 +466,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, { struct opp_table *opp_table; - opp_rcu_lockdep_assert(); + opp_rcu_lockdep_assert(NULL); if (!dev || !freq) { dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); @@ -486,7 +508,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, struct opp_table *opp_table; struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); - opp_rcu_lockdep_assert(); + opp_rcu_lockdep_assert(NULL); if (!dev || !freq) { dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); @@ -1339,12 +1361,13 @@ struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name) ret = -ENOMEM; goto unlock; } - +#if 0 /* This should be called before OPPs are initialized */ if (WARN_ON(!list_empty(&opp_table->opp_list))) { ret = -EBUSY; goto err; } +#endif /* Already have a regulator set */ if (WARN_ON(!IS_ERR(opp_table->regulator))) { @@ -1520,6 +1543,88 @@ unlock: } /** + * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP + * @dev: device for which we do this operation + * @freq: OPP frequency to adjust voltage of + * @u_volt: new OPP voltage + * + * Change the voltage of an OPP with an RCU operation. + * + * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the + * copy operation, returns 0 if no modifcation was done OR modification was + * successful. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks to + * keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, + unsigned long u_volt) +{ + struct opp_table *opp_table; + struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); + int r = 0; + unsigned long tol; + + /* keep the node allocated */ + new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); + if (!new_opp) + return -ENOMEM; + + mutex_lock(&opp_table_lock); + + /* Find the opp_table */ + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + r = PTR_ERR(opp_table); + dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); + goto unlock; + } + + /* Do we have the frequency? */ + list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { + if (tmp_opp->rate == freq) { + opp = tmp_opp; + break; + } + } + if (IS_ERR(opp)) { + r = PTR_ERR(opp); + goto unlock; + } + + /* Is update really needed? */ + if (opp->u_volt == u_volt) + goto unlock; + /* copy the old data over */ + *new_opp = *opp; + + /* plug in new node */ + new_opp->u_volt = u_volt; + tol = u_volt * opp_table->voltage_tolerance_v1 / 100; + new_opp->u_volt = u_volt; + new_opp->u_volt_min = u_volt - tol; + new_opp->u_volt_max = u_volt + tol; + + list_replace_rcu(&opp->node, &new_opp->node); + mutex_unlock(&opp_table_lock); + call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); + + /* Notify the change of the OPP */ + srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADJUST_VOLTAGE, + new_opp); + + return 0; + +unlock: + mutex_unlock(&opp_table_lock); + kfree(new_opp); + return r; +} + +/** * dev_pm_opp_enable() - Enable a specific OPP * @dev: device for which we do this operation * @freq: OPP frequency to enable diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 3cc9bff9d99d..da9ac3bd8dc0 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -344,8 +344,10 @@ config BT_WILINK config BT_QCOMSMD tristate "Qualcomm SMD based HCI support" - depends on QCOM_SMD && QCOM_WCNSS_CTRL + depends on RPMSG || (COMPILE_TEST && RPMSG=n) + depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n) select BT_QCA + depends on RPMSG || RPMSG=n help Qualcomm SMD based HCI driver. This driver is used to bridge HCI data onto the shared memory diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index 08c2c93887c1..68a9d010a276 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -14,7 +14,9 @@ #include <linux/module.h> #include <linux/slab.h> -#include <linux/soc/qcom/smd.h> +#include <linux/rpmsg.h> +#include <linux/of.h> + #include <linux/soc/qcom/wcnss_ctrl.h> #include <linux/platform_device.h> @@ -26,8 +28,9 @@ struct btqcomsmd { struct hci_dev *hdev; - struct qcom_smd_channel *acl_channel; - struct qcom_smd_channel *cmd_channel; + bdaddr_t bdaddr; + struct rpmsg_endpoint *acl_channel; + struct rpmsg_endpoint *cmd_channel; }; static int btqcomsmd_recv(struct hci_dev *hdev, unsigned int type, @@ -48,19 +51,19 @@ static int btqcomsmd_recv(struct hci_dev *hdev, unsigned int type, return hci_recv_frame(hdev, skb); } -static int btqcomsmd_acl_callback(struct qcom_smd_channel *channel, - const void *data, size_t count) +static int btqcomsmd_acl_callback(struct rpmsg_device *rpdev, void *data, + int count, void *priv, u32 addr) { - struct btqcomsmd *btq = qcom_smd_get_drvdata(channel); + struct btqcomsmd *btq = priv; btq->hdev->stat.byte_rx += count; return btqcomsmd_recv(btq->hdev, HCI_ACLDATA_PKT, data, count); } -static int btqcomsmd_cmd_callback(struct qcom_smd_channel *channel, - const void *data, size_t count) +static int btqcomsmd_cmd_callback(struct rpmsg_device *rpdev, void *data, + int count, void *priv, u32 addr) { - struct btqcomsmd *btq = qcom_smd_get_drvdata(channel); + struct btqcomsmd *btq = priv; return btqcomsmd_recv(btq->hdev, HCI_EVENT_PKT, data, count); } @@ -72,12 +75,12 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb) switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: - ret = qcom_smd_send(btq->acl_channel, skb->data, skb->len); + ret = rpmsg_send(btq->acl_channel, skb->data, skb->len); hdev->stat.acl_tx++; hdev->stat.byte_tx += skb->len; break; case HCI_COMMAND_PKT: - ret = qcom_smd_send(btq->cmd_channel, skb->data, skb->len); + ret = rpmsg_send(btq->cmd_channel, skb->data, skb->len); hdev->stat.cmd_tx++; break; default: @@ -100,6 +103,38 @@ static int btqcomsmd_close(struct hci_dev *hdev) return 0; } +static int btqcomsmd_setup(struct hci_dev *hdev) +{ + struct btqcomsmd *btq = hci_get_drvdata(hdev); + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + kfree_skb(skb); + + /* Devices do not have persistent storage for BD address. If no + * BD address has been retrieved during probe, mark the device + * as having an invalid BD address. + */ + if (!bacmp(&btq->bdaddr, BDADDR_ANY)) { + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + return 0; + } + + /* When setting a configured BD address fails, mark the device + * as having an invalid BD address. + */ + err = qca_set_bdaddr_rome(hdev, &btq->bdaddr); + if (err) { + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + return 0; + } + + return 0; +} + static int btqcomsmd_probe(struct platform_device *pdev) { struct btqcomsmd *btq; @@ -114,17 +149,23 @@ static int btqcomsmd_probe(struct platform_device *pdev) wcnss = dev_get_drvdata(pdev->dev.parent); btq->acl_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_ACL", - btqcomsmd_acl_callback); + btqcomsmd_acl_callback, btq); if (IS_ERR(btq->acl_channel)) return PTR_ERR(btq->acl_channel); btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD", - btqcomsmd_cmd_callback); + btqcomsmd_cmd_callback, btq); if (IS_ERR(btq->cmd_channel)) return PTR_ERR(btq->cmd_channel); - qcom_smd_set_drvdata(btq->acl_channel, btq); - qcom_smd_set_drvdata(btq->cmd_channel, btq); + /* The local-bd-address property is usually injected by the + * bootloader which has access to the allocated BD address. + */ + if (!of_property_read_u8_array(pdev->dev.of_node, "local-bd-address", + (u8 *)&btq->bdaddr, sizeof(bdaddr_t))) { + dev_info(&pdev->dev, "BD address %pMR retrieved from device-tree", + &btq->bdaddr); + } hdev = hci_alloc_dev(); if (!hdev) @@ -138,6 +179,7 @@ static int btqcomsmd_probe(struct platform_device *pdev) hdev->open = btqcomsmd_open; hdev->close = btqcomsmd_close; hdev->send = btqcomsmd_send; + hdev->setup = btqcomsmd_setup; hdev->set_bdaddr = qca_set_bdaddr_rome; ret = hci_register_dev(hdev); @@ -158,6 +200,9 @@ static int btqcomsmd_remove(struct platform_device *pdev) hci_unregister_dev(btq->hdev); hci_free_dev(btq->hdev); + rpmsg_destroy_ept(btq->cmd_channel); + rpmsg_destroy_ept(btq->acl_channel); + return 0; } @@ -165,6 +210,7 @@ static const struct of_device_id btqcomsmd_of_match[] = { { .compatible = "qcom,wcnss-bt", }, { }, }; +MODULE_DEVICE_TABLE(of, btqcomsmd_of_match); static struct platform_driver btqcomsmd_driver = { .probe = btqcomsmd_probe, diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index 16a3d5717f4e..f23913144c96 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -26,35 +26,27 @@ * parent - parent is adjustable through clk_set_parent */ -static u8 clk_mux_get_parent(struct clk_hw *hw) +#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw) + +unsigned int clk_mux_get_parent(struct clk_hw *hw, unsigned int val, + unsigned int *table, unsigned long flags) { struct clk_mux *mux = to_clk_mux(hw); int num_parents = clk_hw_get_num_parents(hw); - u32 val; - - /* - * FIXME need a mux-specific flag to determine if val is bitwise or numeric - * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1 - * to 0x7 (index starts at one) - * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so - * val = 0x4 really means "bit 2, index starts at bit 0" - */ - val = clk_readl(mux->reg) >> mux->shift; - val &= mux->mask; - if (mux->table) { + if (table) { int i; for (i = 0; i < num_parents; i++) - if (mux->table[i] == val) + if (table[i] == val) return i; return -EINVAL; } - if (val && (mux->flags & CLK_MUX_INDEX_BIT)) + if (val && (flags & CLK_MUX_INDEX_BIT)) val = ffs(val) - 1; - if (val && (mux->flags & CLK_MUX_INDEX_ONE)) + if (val && (flags & CLK_MUX_INDEX_ONE)) val--; if (val >= num_parents) @@ -62,23 +54,53 @@ static u8 clk_mux_get_parent(struct clk_hw *hw) return val; } +EXPORT_SYMBOL_GPL(clk_mux_get_parent); -static int clk_mux_set_parent(struct clk_hw *hw, u8 index) +static u8 _clk_mux_get_parent(struct clk_hw *hw) { struct clk_mux *mux = to_clk_mux(hw); u32 val; - unsigned long flags = 0; - if (mux->table) { - index = mux->table[index]; + /* + * FIXME need a mux-specific flag to determine if val is bitwise or numeric + * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1 + * to 0x7 (index starts at one) + * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so + * val = 0x4 really means "bit 2, index starts at bit 0" + */ + val = clk_readl(mux->reg) >> mux->shift; + val &= mux->mask; + + return clk_mux_get_parent(hw, val, mux->table, mux->flags); +} + +unsigned int clk_mux_reindex(u8 index, unsigned int *table, + unsigned long flags) +{ + unsigned int val = index; + + if (table) { + val = table[val]; } else { - if (mux->flags & CLK_MUX_INDEX_BIT) - index = 1 << index; + if (flags & CLK_MUX_INDEX_BIT) + val = 1 << index; - if (mux->flags & CLK_MUX_INDEX_ONE) - index++; + if (flags & CLK_MUX_INDEX_ONE) + val++; } + return val; +} +EXPORT_SYMBOL_GPL(clk_mux_reindex); + +static int clk_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_mux *mux = to_clk_mux(hw); + u32 val; + unsigned long flags = 0; + + index = clk_mux_reindex(index, mux->table, mux->flags); + if (mux->lock) spin_lock_irqsave(mux->lock, flags); else @@ -102,14 +124,14 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index) } const struct clk_ops clk_mux_ops = { - .get_parent = clk_mux_get_parent, + .get_parent = _clk_mux_get_parent, .set_parent = clk_mux_set_parent, .determine_rate = __clk_mux_determine_rate, }; EXPORT_SYMBOL_GPL(clk_mux_ops); const struct clk_ops clk_mux_ro_ops = { - .get_parent = clk_mux_get_parent, + .get_parent = _clk_mux_get_parent, }; EXPORT_SYMBOL_GPL(clk_mux_ro_ops); @@ -117,7 +139,7 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, - u8 clk_mux_flags, u32 *table, spinlock_t *lock) + u8 clk_mux_flags, unsigned int *table, spinlock_t *lock) { struct clk_mux *mux; struct clk_hw *hw; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 0fb39fe217d1..ece4fa171d46 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -51,9 +51,13 @@ struct clk_core { struct clk_core **parents; u8 num_parents; u8 new_parent_index; + u8 safe_parent_index; unsigned long rate; unsigned long req_rate; + unsigned long old_rate; unsigned long new_rate; + unsigned long safe_freq; + struct clk_core *safe_parent; struct clk_core *new_parent; struct clk_core *new_child; unsigned long flags; @@ -1310,7 +1314,9 @@ out: static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, struct clk_core *new_parent, u8 p_index) { - struct clk_core *child; + struct clk_core *child, *parent; + struct clk_hw *parent_hw; + unsigned long safe_freq = 0; core->new_rate = new_rate; core->new_parent = new_parent; @@ -1320,6 +1326,23 @@ static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, if (new_parent && new_parent != core->parent) new_parent->new_child = core; + if (core->ops->get_safe_parent) { + parent_hw = core->ops->get_safe_parent(core->hw, &safe_freq); + if (parent_hw) { + parent = parent_hw->core; + p_index = clk_fetch_parent_index(core, parent); + core->safe_parent_index = p_index; + core->safe_parent = parent; + if (safe_freq) + core->safe_freq = safe_freq; + else + core->safe_freq = 0; + } + } else { + core->safe_parent = NULL; + core->safe_freq = 0; + } + hlist_for_each_entry(child, &core->children, child_node) { child->new_rate = clk_recalc(child, new_rate); clk_calc_subtree(child, child->new_rate, NULL, 0); @@ -1432,14 +1455,51 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *core, unsigned long event) { struct clk_core *child, *tmp_clk, *fail_clk = NULL; + struct clk_core *old_parent; int ret = NOTIFY_DONE; - if (core->rate == core->new_rate) + if (core->rate == core->new_rate && event != POST_RATE_CHANGE) return NULL; + switch (event) { + case PRE_RATE_CHANGE: + if (core->safe_parent) { + if (core->safe_freq) + core->ops->set_rate_and_parent(core->hw, + core->safe_freq, + core->safe_parent->rate, + core->safe_parent_index); + else + core->ops->set_parent(core->hw, + core->safe_parent_index); + } + core->old_rate = core->rate; + break; + case POST_RATE_CHANGE: + if (core->safe_parent) { + old_parent = __clk_set_parent_before(core, + core->new_parent); + if (core->ops->set_rate_and_parent) { + core->ops->set_rate_and_parent(core->hw, + core->new_rate, + core->new_parent ? + core->new_parent->rate : 0, + core->new_parent_index); + } else if (core->ops->set_parent) { + core->ops->set_parent(core->hw, + core->new_parent_index); + } + __clk_set_parent_after(core, core->new_parent, + old_parent); + } + break; + } + if (core->notifier_count) { - ret = __clk_notify(core, event, core->rate, core->new_rate); - if (ret & NOTIFY_STOP_MASK) + if (event != POST_RATE_CHANGE || core->old_rate != core->rate) + ret = __clk_notify(core, event, core->old_rate, + core->new_rate); + if (ret & NOTIFY_STOP_MASK && event != POST_RATE_CHANGE) fail_clk = core; } @@ -1466,12 +1526,12 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *core, * walk down a subtree and set the new rates notifying the rate * change on the way */ -static void clk_change_rate(struct clk_core *core) +static void +clk_change_rate(struct clk_core *core, unsigned long best_parent_rate) { struct clk_core *child; struct hlist_node *tmp; unsigned long old_rate; - unsigned long best_parent_rate = 0; bool skip_set_rate = false; struct clk_core *old_parent; struct clk_core *parent = NULL; @@ -1495,7 +1555,8 @@ static void clk_change_rate(struct clk_core *core) clk_enable_unlock(flags); } - if (core->new_parent && core->new_parent != core->parent) { + if (core->new_parent && core->new_parent != core->parent && + !core->safe_parent) { old_parent = __clk_set_parent_before(core, core->new_parent); trace_clk_set_parent(core, core->new_parent); @@ -1523,6 +1584,7 @@ static void clk_change_rate(struct clk_core *core) trace_clk_set_rate_complete(core, core->new_rate); core->rate = clk_recalc(core, best_parent_rate); + core->rate = core->new_rate; if (core->flags & CLK_SET_RATE_UNGATE) { unsigned long flags; @@ -1550,12 +1612,13 @@ static void clk_change_rate(struct clk_core *core) /* Skip children who will be reparented to another clock */ if (child->new_parent && child->new_parent != core) continue; - clk_change_rate(child); + if (child->new_rate != child->rate) + clk_change_rate(child, core->new_rate); } - /* handle the new child who might not be in core->children yet */ - if (core->new_child) - clk_change_rate(core->new_child); + /* handle the new child who might not be in clk->children yet */ + if (core->new_child && core->new_child->new_rate != core->new_child->rate) + clk_change_rate(core->new_child, core->new_rate); } static int clk_core_set_rate_nolock(struct clk_core *core, @@ -1563,6 +1626,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core, { struct clk_core *top, *fail_clk; unsigned long rate = req_rate; + unsigned long parent_rate; if (!core) return 0; @@ -1588,11 +1652,18 @@ static int clk_core_set_rate_nolock(struct clk_core *core, return -EBUSY; } + if (top->parent) + parent_rate = top->parent->rate; + else + parent_rate = 0; + /* change the rates */ - clk_change_rate(top); + clk_change_rate(top, parent_rate); core->req_rate = req_rate; + clk_propagate_rate_change(top, POST_RATE_CHANGE); + return 0; } diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 0146d3c2547f..b7f978307571 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -2,6 +2,9 @@ config QCOM_GDSC bool select PM_GENERIC_DOMAINS if PM +config QCOM_RPMCC + bool + config COMMON_CLK_QCOM tristate "Support for Qualcomm's clock controllers" depends on OF @@ -9,6 +12,32 @@ config COMMON_CLK_QCOM select REGMAP_MMIO select RESET_CONTROLLER +config QCOM_CLK_RPM + tristate "RPM based Clock Controller" + depends on COMMON_CLK_QCOM && MFD_QCOM_RPM + select QCOM_RPMCC + help + The RPM (Resource Power Manager) is a dedicated hardware engine for + managing the shared SoC resources in order to keep the lowest power + profile. It communicates with other hardware subsystems via shared + memory and accepts clock requests, aggregates the requests and turns + the clocks on/off or scales them on demand. + Say Y if you want to support the clocks exposed by the RPM on + platforms such as apq8064, msm8660, msm8960 etc. + +config QCOM_CLK_SMD_RPM + tristate "RPM over SMD based Clock Controller" + depends on COMMON_CLK_QCOM && QCOM_SMD_RPM + select QCOM_RPMCC + help + The RPM (Resource Power Manager) is a dedicated hardware engine for + managing the shared SoC resources in order to keep the lowest power + profile. It communicates with other hardware subsystems via shared + memory and accepts clock requests, aggregates the requests and turns + the clocks on/off or scales them on demand. + Say Y if you want to support the clocks exposed by the RPM on + platforms such as apq8016, apq8084, msm8974 etc. + config APQ_GCC_8084 tristate "APQ8084 Global Clock Controller" select QCOM_GDSC @@ -132,6 +161,14 @@ config MSM_MMCC_8974 Say Y if you want to support multimedia devices such as display, graphics, video encode/decode, camera, etc. +config MSM_GCC_8994 + tristate "MSM8994 Global Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the global clock controller on msm8994 devices. + Say Y if you want to use peripheral devices such as UART, SPI, + i2c, USB, UFS, SD/eMMC, PCIe, etc. + config MSM_GCC_8996 tristate "MSM8996 Global Clock Controller" select QCOM_GDSC @@ -150,3 +187,48 @@ config MSM_MMCC_8996 Support for the multimedia clock controller on msm8996 devices. Say Y if you want to support multimedia devices such as display, graphics, video encode/decode, camera, etc. + +config QCOM_A53CC + bool "A53 Clock Controller" + depends on COMMON_CLK_QCOM && QCOM_A53PLL + help + Support for the A53 clock controller on some Qualcomm devices. + Say Y if you want to support CPU frequency scaling on devices + such as MSM8916. + +config QCOM_A53PLL + bool "A53 PLL" + depends on COMMON_CLK_QCOM + help + Support for the A53 PLL on some Qualcomm devices. It provides + support for CPU frequencies above 1GHz. + Say Y if you want to support CPU frequency scaling on devices + such as MSM8916. + +config QCOM_HFPLL + tristate "High-Frequency PLL (HFPLL) Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the high-frequency PLLs present on Qualcomm devices. + Say Y if you want to support CPU frequency scaling on devices + such as MSM8974, APQ8084, etc. + +config KPSS_XCC + tristate "KPSS Clock Controller" + depends on COMMON_CLK_QCOM + help + Support for the Krait ACC and GCC clock controllers. Say Y + if you want to support CPU frequency scaling on devices such + as MSM8960, APQ8064, etc. + +config KRAITCC + tristate "Krait Clock Controller" + depends on COMMON_CLK_QCOM && ARM + select KRAIT_CLOCKS + help + Support for the Krait CPU clocks on Qualcomm devices. + Say Y if you want to support CPU frequency scaling. + +config KRAIT_CLOCKS + bool + select KRAIT_L2_ACCESSORS diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 1fb1f5476cb0..50e7a86f3214 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -9,6 +9,9 @@ clk-qcom-y += clk-rcg2.o clk-qcom-y += clk-branch.o clk-qcom-y += clk-regmap-divider.o clk-qcom-y += clk-regmap-mux.o +clk-qcom-y += clk-regmap-mux-div.o +clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o +clk-qcom-y += clk-hfpll.o clk-qcom-y += reset.o clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o @@ -24,8 +27,16 @@ obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o +obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o +obj-$(CONFIG_QCOM_A53CC) += a53cc.o +obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o +obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o +obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o +obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o +obj-$(CONFIG_QCOM_HFPLL) += hfpll.o +obj-$(CONFIG_KRAITCC) += krait-cc.o diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c new file mode 100644 index 000000000000..40610d4076dd --- /dev/null +++ b/drivers/clk/qcom/a53-pll.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2016, Linaro Limited + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include "clk-pll.h" +#include "clk-regmap.h" + +static const struct pll_freq_tbl a53pll_freq[] = { + { 998400000, 52, 0x0, 0x1, 0 }, + { 1094400000, 57, 0x0, 0x1, 0 }, + { 1152000000, 62, 0x0, 0x1, 0 }, + { 1209600000, 65, 0x0, 0x1, 0 }, + { 1401600000, 73, 0x0, 0x1, 0 }, +}; + +static const struct regmap_config a53pll_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x40, + .fast_io = true, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + +static const struct of_device_id qcom_a53pll_match_table[] = { + { .compatible = "qcom,a53pll-msm8916" }, + { } +}; + +static int qcom_a53pll_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct clk_pll *pll; + struct resource *res; + void __iomem *base; + struct regmap *regmap; + struct clk_init_data init = { }; + + pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); + if (!pll) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + regmap = devm_regmap_init_mmio(dev, base, &a53pll_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + pll->l_reg = 0x04; + pll->m_reg = 0x08; + pll->n_reg = 0x0c; + pll->config_reg = 0x14; + pll->mode_reg = 0x00; + pll->status_reg = 0x1c; + pll->status_bit = 16; + pll->freq_tbl = a53pll_freq; + + init.name = "a53pll"; + init.parent_names = (const char *[]){ "xo" }; + init.num_parents = 1; + init.ops = &clk_pll_sr2_ops; + init.flags = CLK_IS_CRITICAL; + pll->clkr.hw.init = &init; + + return devm_clk_register_regmap(dev, &pll->clkr); +} + +static struct platform_driver qcom_a53pll_driver = { + .probe = qcom_a53pll_probe, + .driver = { + .name = "qcom-a53pll", + .of_match_table = qcom_a53pll_match_table, + }, +}; + +builtin_platform_driver(qcom_a53pll_driver); diff --git a/drivers/clk/qcom/a53cc.c b/drivers/clk/qcom/a53cc.c new file mode 100644 index 000000000000..f9b19939e8ae --- /dev/null +++ b/drivers/clk/qcom/a53cc.c @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2016, Linaro Limited + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include "clk-regmap.h" +#include "clk-regmap-mux-div.h" + +enum { + P_GPLL0, + P_A53PLL, +}; + +static const struct parent_map gpll0_a53cc_map[] = { + { P_GPLL0, 4 }, + { P_A53PLL, 5 }, +}; + +static const char * const gpll0_a53cc[] = { + "gpll0_vote", + "a53pll", +}; + +static const struct regmap_config a53cc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x1000, + .fast_io = true, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + +/* + * We use the notifier function for switching to a temporary safe configuration + * (mux and divider), while the a53 pll is reconfigured. + */ +static int a53cc_notifier_cb(struct notifier_block *nb, unsigned long event, + void *data) +{ + int ret = 0; + struct clk_regmap_mux_div *md = container_of(nb, + struct clk_regmap_mux_div, + clk_nb); + if (event == PRE_RATE_CHANGE) + /* set the mux and divider to safe frequency (400mhz) */ + ret = __mux_div_set_src_div(md, 4, 3); + + return notifier_from_errno(ret); +} + +static const struct of_device_id qcom_a53cc_match_table[] = { + { .compatible = "qcom,a53cc-msm8916" }, + { } +}; + +static int qcom_a53cc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct clk_regmap_mux_div *a53cc; + struct resource *res; + void __iomem *base; + struct clk *pclk; + struct regmap *regmap; + struct clk_init_data init = { }; + int ret; + + a53cc = devm_kzalloc(dev, sizeof(*a53cc), GFP_KERNEL); + if (!a53cc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + a53cc->reg_offset = 0x50; + a53cc->hid_width = 5; + a53cc->hid_shift = 0; + a53cc->src_width = 3; + a53cc->src_shift = 8; + a53cc->parent_map = gpll0_a53cc_map; + + init.name = "a53mux"; + init.parent_names = gpll0_a53cc; + init.num_parents = 2; + init.ops = &clk_regmap_mux_div_ops; + init.flags = CLK_SET_RATE_PARENT; + a53cc->clkr.hw.init = &init; + + pclk = __clk_lookup(gpll0_a53cc[1]); + if (!pclk) + return -EPROBE_DEFER; + + a53cc->clk_nb.notifier_call = a53cc_notifier_cb; + ret = clk_notifier_register(pclk, &a53cc->clk_nb); + if (ret) { + dev_err(dev, "failed to register clock notifier: %d\n", ret); + return ret; + } + + regmap = devm_regmap_init_mmio(dev, base, &a53cc_regmap_config); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + dev_err(dev, "failed to init regmap mmio: %d\n", ret); + goto err; + } + + a53cc->clkr.regmap = regmap; + + ret = devm_clk_register_regmap(dev, &a53cc->clkr); + if (ret) { + dev_err(dev, "failed to register regmap clock: %d\n", ret); + goto err; + } + + ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get, + &a53cc->clkr.hw); + if (ret) { + dev_err(dev, "failed to add clock provider: %d\n", ret); + goto err; + } + + return 0; +err: + clk_notifier_unregister(pclk, &a53cc->clk_nb); + return ret; +} + +static struct platform_driver qcom_a53cc_driver = { + .probe = qcom_a53cc_probe, + .driver = { + .name = "qcom-a53cc", + .of_match_table = qcom_a53cc_match_table, + }, +}; + +builtin_platform_driver(qcom_a53cc_driver); diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index e6a03eaf7a93..47a1da3739ce 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -18,17 +18,21 @@ #include <linux/delay.h> #include "clk-alpha-pll.h" +#include "common.h" #define PLL_MODE 0x00 # define PLL_OUTCTRL BIT(0) # define PLL_BYPASSNL BIT(1) # define PLL_RESET_N BIT(2) +# define PLL_OFFLINE_REQ BIT(7) # define PLL_LOCK_COUNT_SHIFT 8 # define PLL_LOCK_COUNT_MASK 0x3f # define PLL_BIAS_COUNT_SHIFT 14 # define PLL_BIAS_COUNT_MASK 0x3f # define PLL_VOTE_FSM_ENA BIT(20) +# define PLL_FSM_ENA BIT(20) # define PLL_VOTE_FSM_RESET BIT(21) +# define PLL_OFFLINE_ACK BIT(28) # define PLL_ACTIVE_FLAG BIT(30) # define PLL_LOCK_DET BIT(31) @@ -46,6 +50,7 @@ #define PLL_USER_CTL_U 0x14 #define PLL_CONFIG_CTL 0x18 +#define PLL_CONFIG_CTL_U 0x20 #define PLL_TEST_CTL 0x1c #define PLL_TEST_CTL_U 0x20 #define PLL_STATUS 0x24 @@ -55,6 +60,7 @@ */ #define ALPHA_REG_BITWIDTH 40 #define ALPHA_BITWIDTH 32 +#define ALPHA_16BIT_MASK 0xffff #define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \ struct clk_alpha_pll, clkr) @@ -62,9 +68,10 @@ #define to_clk_alpha_pll_postdiv(_hw) container_of(to_clk_regmap(_hw), \ struct clk_alpha_pll_postdiv, clkr) -static int wait_for_pll(struct clk_alpha_pll *pll) +static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse, + const char *action) { - u32 val, mask, off; + u32 val, off; int count; int ret; const char *name = clk_hw_get_name(&pll->clkr.hw); @@ -74,26 +81,148 @@ static int wait_for_pll(struct clk_alpha_pll *pll) if (ret) return ret; - if (val & PLL_VOTE_FSM_ENA) - mask = PLL_ACTIVE_FLAG; - else - mask = PLL_LOCK_DET; - - /* Wait for pll to enable. */ for (count = 100; count > 0; count--) { ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val); if (ret) return ret; - if ((val & mask) == mask) + if (inverse && !(val & mask)) + return 0; + else if ((val & mask) == mask) return 0; udelay(1); } - WARN(1, "%s didn't enable after voting for it!\n", name); + WARN(1, "%s failed to %s!\n", name, action); return -ETIMEDOUT; } +#define wait_for_pll_enable_active(pll) \ + wait_for_pll(pll, PLL_ACTIVE_FLAG, 0, "enable") + +#define wait_for_pll_enable_lock(pll) \ + wait_for_pll(pll, PLL_LOCK_DET, 0, "enable") + +#define wait_for_pll_disable(pll) \ + wait_for_pll(pll, PLL_ACTIVE_FLAG, 1, "disable") + +#define wait_for_pll_offline(pll) \ + wait_for_pll(pll, PLL_OFFLINE_ACK, 0, "offline") + +void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config) +{ + u32 val, mask; + u32 off = pll->offset; + + regmap_write(regmap, off + PLL_L_VAL, config->l); + regmap_write(regmap, off + PLL_ALPHA_VAL, config->alpha); + regmap_write(regmap, off + PLL_CONFIG_CTL, config->config_ctl_val); + regmap_write(regmap, off + PLL_CONFIG_CTL_U, config->config_ctl_hi_val); + + val = config->main_output_mask; + val |= config->aux_output_mask; + val |= config->aux2_output_mask; + val |= config->early_output_mask; + val |= config->pre_div_val; + val |= config->post_div_val; + val |= config->vco_val; + + mask = config->main_output_mask; + mask |= config->aux_output_mask; + mask |= config->aux2_output_mask; + mask |= config->early_output_mask; + mask |= config->pre_div_mask; + mask |= config->post_div_mask; + mask |= config->vco_mask; + + regmap_update_bits(regmap, off + PLL_USER_CTL, mask, val); + + if (pll->flags & SUPPORTS_FSM_MODE) + qcom_pll_set_fsm_mode(regmap, off + PLL_MODE, 6, 0); +} + +static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw) +{ + int ret; + u32 val, off; + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + + off = pll->offset; + ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val); + if (ret) + return ret; + + val |= PLL_FSM_ENA; + + if (pll->flags & SUPPORTS_OFFLINE_REQ) + val &= ~PLL_OFFLINE_REQ; + + ret = regmap_write(pll->clkr.regmap, off + PLL_MODE, val); + if (ret) + return ret; + + /* Make sure enable request goes through before waiting for update */ + mb(); + + return wait_for_pll_enable_active(pll); +} + +static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw) +{ + int ret; + u32 val, off; + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + + off = pll->offset; + ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val); + if (ret) + return; + + if (pll->flags & SUPPORTS_OFFLINE_REQ) { + ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE, + PLL_OFFLINE_REQ, PLL_OFFLINE_REQ); + if (ret) + return; + + ret = wait_for_pll_offline(pll); + if (ret) + return; + } + + /* Disable hwfsm */ + ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE, + PLL_FSM_ENA, 0); + if (ret) + return; + + wait_for_pll_disable(pll); +} + +static int pll_is_enabled(struct clk_hw *hw, u32 mask) +{ + int ret; + u32 val, off; + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + + off = pll->offset; + ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val); + if (ret) + return ret; + + return !!(val & mask); +} + +static int clk_alpha_pll_hwfsm_is_enabled(struct clk_hw *hw) +{ + return pll_is_enabled(hw, PLL_ACTIVE_FLAG); +} + +static int clk_alpha_pll_is_enabled(struct clk_hw *hw) +{ + return pll_is_enabled(hw, PLL_LOCK_DET); +} + static int clk_alpha_pll_enable(struct clk_hw *hw) { int ret; @@ -112,7 +241,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw) ret = clk_enable_regmap(hw); if (ret) return ret; - return wait_for_pll(pll); + return wait_for_pll_enable_active(pll); } /* Skip if already enabled */ @@ -136,7 +265,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw) if (ret) return ret; - ret = wait_for_pll(pll); + ret = wait_for_pll_enable_lock(pll); if (ret) return ret; @@ -234,9 +363,14 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) regmap_read(pll->clkr.regmap, off + PLL_USER_CTL, &ctl); if (ctl & PLL_ALPHA_EN) { regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL, &low); - regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, &high); - a = (u64)high << 32 | low; - a >>= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH; + if (pll->flags & SUPPORTS_16BIT_ALPHA) { + a = low & ALPHA_16BIT_MASK; + } else { + regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, + &high); + a = (u64)high << 32 | low; + a >>= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH; + } } return alpha_pll_calc_rate(prate, l, a); @@ -257,11 +391,15 @@ static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate, return -EINVAL; } - a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH); - regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l); - regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL, a); - regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, a >> 32); + + if (pll->flags & SUPPORTS_16BIT_ALPHA) { + regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL, + a & ALPHA_16BIT_MASK); + } else { + a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH); + regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, a >> 32); + } regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL, PLL_VCO_MASK << PLL_VCO_SHIFT, @@ -294,12 +432,23 @@ static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate, const struct clk_ops clk_alpha_pll_ops = { .enable = clk_alpha_pll_enable, .disable = clk_alpha_pll_disable, + .is_enabled = clk_alpha_pll_is_enabled, .recalc_rate = clk_alpha_pll_recalc_rate, .round_rate = clk_alpha_pll_round_rate, .set_rate = clk_alpha_pll_set_rate, }; EXPORT_SYMBOL_GPL(clk_alpha_pll_ops); +const struct clk_ops clk_alpha_pll_hwfsm_ops = { + .enable = clk_alpha_pll_hwfsm_enable, + .disable = clk_alpha_pll_hwfsm_disable, + .is_enabled = clk_alpha_pll_hwfsm_is_enabled, + .recalc_rate = clk_alpha_pll_recalc_rate, + .round_rate = clk_alpha_pll_round_rate, + .set_rate = clk_alpha_pll_set_rate, +}; +EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops); + static unsigned long clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index 90ce2016e1a0..d6e1ee2c7348 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -34,6 +34,10 @@ struct clk_alpha_pll { const struct pll_vco *vco_table; size_t num_vco; +#define SUPPORTS_OFFLINE_REQ BIT(0) +#define SUPPORTS_16BIT_ALPHA BIT(1) +#define SUPPORTS_FSM_MODE BIT(2) + u8 flags; struct clk_regmap clkr; }; @@ -51,7 +55,28 @@ struct clk_alpha_pll_postdiv { struct clk_regmap clkr; }; +struct alpha_pll_config { + u32 l; + u32 alpha; + u32 config_ctl_val; + u32 config_ctl_hi_val; + u32 main_output_mask; + u32 aux_output_mask; + u32 aux2_output_mask; + u32 early_output_mask; + u32 pre_div_val; + u32 pre_div_mask; + u32 post_div_val; + u32 post_div_mask; + u32 vco_val; + u32 vco_mask; +}; + extern const struct clk_ops clk_alpha_pll_ops; +extern const struct clk_ops clk_alpha_pll_hwfsm_ops; extern const struct clk_ops clk_alpha_pll_postdiv_ops; +void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config); + #endif diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c new file mode 100644 index 000000000000..eacf853c132e --- /dev/null +++ b/drivers/clk/qcom/clk-hfpll.c @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/regmap.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/clk-provider.h> +#include <linux/spinlock.h> + +#include "clk-regmap.h" +#include "clk-hfpll.h" + +#define PLL_OUTCTRL BIT(0) +#define PLL_BYPASSNL BIT(1) +#define PLL_RESET_N BIT(2) + +/* Initialize a HFPLL at a given rate and enable it. */ +static void __clk_hfpll_init_once(struct clk_hw *hw) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + + if (likely(h->init_done)) + return; + + /* Configure PLL parameters for integer mode. */ + if (hd->config_val) + regmap_write(regmap, hd->config_reg, hd->config_val); + regmap_write(regmap, hd->m_reg, 0); + regmap_write(regmap, hd->n_reg, 1); + + if (hd->user_reg) { + u32 regval = hd->user_val; + unsigned long rate; + + rate = clk_hw_get_rate(hw); + + /* Pick the right VCO. */ + if (hd->user_vco_mask && rate > hd->low_vco_max_rate) + regval |= hd->user_vco_mask; + regmap_write(regmap, hd->user_reg, regval); + } + + if (hd->droop_reg) + regmap_write(regmap, hd->droop_reg, hd->droop_val); + + h->init_done = true; +} + +static void __clk_hfpll_enable(struct clk_hw *hw) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + u32 val; + + __clk_hfpll_init_once(hw); + + /* Disable PLL bypass mode. */ + regmap_update_bits(regmap, hd->mode_reg, PLL_BYPASSNL, PLL_BYPASSNL); + + /* + * H/W requires a 5us delay between disabling the bypass and + * de-asserting the reset. Delay 10us just to be safe. + */ + udelay(10); + + /* De-assert active-low PLL reset. */ + regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N); + + /* Wait for PLL to lock. */ + if (hd->status_reg) { + do { + regmap_read(regmap, hd->status_reg, &val); + } while (!(val & BIT(hd->lock_bit))); + } else { + udelay(60); + } + + /* Enable PLL output. */ + regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL); +} + +/* Enable an already-configured HFPLL. */ +static int clk_hfpll_enable(struct clk_hw *hw) +{ + unsigned long flags; + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + u32 mode; + + spin_lock_irqsave(&h->lock, flags); + regmap_read(regmap, hd->mode_reg, &mode); + if (!(mode & (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL))) + __clk_hfpll_enable(hw); + spin_unlock_irqrestore(&h->lock, flags); + + return 0; +} + +static void __clk_hfpll_disable(struct clk_hfpll *h) +{ + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + + /* + * Disable the PLL output, disable test mode, enable the bypass mode, + * and assert the reset. + */ + regmap_update_bits(regmap, hd->mode_reg, + PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL, 0); +} + +static void clk_hfpll_disable(struct clk_hw *hw) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + unsigned long flags; + + spin_lock_irqsave(&h->lock, flags); + __clk_hfpll_disable(h); + spin_unlock_irqrestore(&h->lock, flags); +} + +static long clk_hfpll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + unsigned long rrate; + + rate = clamp(rate, hd->min_rate, hd->max_rate); + + rrate = DIV_ROUND_UP(rate, *parent_rate) * *parent_rate; + if (rrate > hd->max_rate) + rrate -= *parent_rate; + + return rrate; +} + +/* + * For optimization reasons, assumes no downstream clocks are actively using + * it. + */ +static int clk_hfpll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + unsigned long flags; + u32 l_val, val; + bool enabled; + + l_val = rate / parent_rate; + + spin_lock_irqsave(&h->lock, flags); + + enabled = __clk_is_enabled(hw->clk); + if (enabled) + __clk_hfpll_disable(h); + + /* Pick the right VCO. */ + if (hd->user_reg && hd->user_vco_mask) { + regmap_read(regmap, hd->user_reg, &val); + if (rate <= hd->low_vco_max_rate) + val &= ~hd->user_vco_mask; + else + val |= hd->user_vco_mask; + regmap_write(regmap, hd->user_reg, val); + } + + regmap_write(regmap, hd->l_reg, l_val); + + if (enabled) + __clk_hfpll_enable(hw); + + spin_unlock_irqrestore(&h->lock, flags); + + return 0; +} + +static unsigned long clk_hfpll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + u32 l_val; + + regmap_read(regmap, hd->l_reg, &l_val); + + return l_val * parent_rate; +} + +static void clk_hfpll_init(struct clk_hw *hw) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + u32 mode, status; + + regmap_read(regmap, hd->mode_reg, &mode); + if (mode != (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)) { + __clk_hfpll_init_once(hw); + return; + } + + if (hd->status_reg) { + regmap_read(regmap, hd->status_reg, &status); + if (!(status & BIT(hd->lock_bit))) { + WARN(1, "HFPLL %s is ON, but not locked!\n", + __clk_get_name(hw->clk)); + clk_hfpll_disable(hw); + __clk_hfpll_init_once(hw); + } + } +} + +static int hfpll_is_enabled(struct clk_hw *hw) +{ + struct clk_hfpll *h = to_clk_hfpll(hw); + struct hfpll_data const *hd = h->d; + struct regmap *regmap = h->clkr.regmap; + u32 mode; + + regmap_read(regmap, hd->mode_reg, &mode); + mode &= 0x7; + return mode == (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL); +} + +const struct clk_ops clk_ops_hfpll = { + .enable = clk_hfpll_enable, + .disable = clk_hfpll_disable, + .is_enabled = hfpll_is_enabled, + .round_rate = clk_hfpll_round_rate, + .set_rate = clk_hfpll_set_rate, + .recalc_rate = clk_hfpll_recalc_rate, + .init = clk_hfpll_init, +}; +EXPORT_SYMBOL_GPL(clk_ops_hfpll); diff --git a/drivers/clk/qcom/clk-hfpll.h b/drivers/clk/qcom/clk-hfpll.h new file mode 100644 index 000000000000..48c18d664f4e --- /dev/null +++ b/drivers/clk/qcom/clk-hfpll.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __QCOM_CLK_HFPLL_H__ +#define __QCOM_CLK_HFPLL_H__ + +#include <linux/clk-provider.h> +#include <linux/spinlock.h> +#include "clk-regmap.h" + +struct hfpll_data { + u32 mode_reg; + u32 l_reg; + u32 m_reg; + u32 n_reg; + u32 user_reg; + u32 droop_reg; + u32 config_reg; + u32 status_reg; + u8 lock_bit; + + u32 droop_val; + u32 config_val; + u32 user_val; + u32 user_vco_mask; + unsigned long low_vco_max_rate; + + unsigned long min_rate; + unsigned long max_rate; +}; + +struct clk_hfpll { + struct hfpll_data const *d; + int init_done; + + struct clk_regmap clkr; + spinlock_t lock; +}; + +#define to_clk_hfpll(_hw) \ + container_of(to_clk_regmap(_hw), struct clk_hfpll, clkr) + +extern const struct clk_ops clk_ops_hfpll; + +#endif diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c new file mode 100644 index 000000000000..84277741a9c8 --- /dev/null +++ b/drivers/clk/qcom/clk-krait.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/clk-provider.h> +#include <linux/spinlock.h> + +#include <asm/krait-l2-accessors.h> + +#include "clk-krait.h" + +/* Secondary and primary muxes share the same cp15 register */ +static DEFINE_SPINLOCK(krait_clock_reg_lock); + +#define LPL_SHIFT 8 +static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel) +{ + unsigned long flags; + u32 regval; + + spin_lock_irqsave(&krait_clock_reg_lock, flags); + regval = krait_get_l2_indirect_reg(mux->offset); + regval &= ~(mux->mask << mux->shift); + regval |= (sel & mux->mask) << mux->shift; + if (mux->lpl) { + regval &= ~(mux->mask << (mux->shift + LPL_SHIFT)); + regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT); + } + krait_set_l2_indirect_reg(mux->offset, regval); + spin_unlock_irqrestore(&krait_clock_reg_lock, flags); + + /* Wait for switch to complete. */ + mb(); + udelay(1); +} + +static int krait_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct krait_mux_clk *mux = to_krait_mux_clk(hw); + u32 sel; + + sel = clk_mux_reindex(index, mux->parent_map, 0); + mux->en_mask = sel; + /* Don't touch mux if CPU is off as it won't work */ + if (__clk_is_enabled(hw->clk)) + __krait_mux_set_sel(mux, sel); + return 0; +} + +static u8 krait_mux_get_parent(struct clk_hw *hw) +{ + struct krait_mux_clk *mux = to_krait_mux_clk(hw); + u32 sel; + + sel = krait_get_l2_indirect_reg(mux->offset); + sel >>= mux->shift; + sel &= mux->mask; + mux->en_mask = sel; + + return clk_mux_get_parent(hw, sel, mux->parent_map, 0); +} + +static struct clk_hw *krait_mux_get_safe_parent(struct clk_hw *hw, + unsigned long *safe_freq) +{ + int i; + struct krait_mux_clk *mux = to_krait_mux_clk(hw); + int num_parents = clk_hw_get_num_parents(hw); + + i = mux->safe_sel; + for (i = 0; i < num_parents; i++) + if (mux->safe_sel == mux->parent_map[i]) + break; + + return clk_hw_get_parent_by_index(hw, i); +} + +static int krait_mux_enable(struct clk_hw *hw) +{ + struct krait_mux_clk *mux = to_krait_mux_clk(hw); + + __krait_mux_set_sel(mux, mux->en_mask); + + return 0; +} + +static void krait_mux_disable(struct clk_hw *hw) +{ + struct krait_mux_clk *mux = to_krait_mux_clk(hw); + + __krait_mux_set_sel(mux, mux->safe_sel); +} + +const struct clk_ops krait_mux_clk_ops = { + .enable = krait_mux_enable, + .disable = krait_mux_disable, + .set_parent = krait_mux_set_parent, + .get_parent = krait_mux_get_parent, + .determine_rate = __clk_mux_determine_rate_closest, + .get_safe_parent = krait_mux_get_safe_parent, +}; +EXPORT_SYMBOL_GPL(krait_mux_clk_ops); + +/* The divider can divide by 2, 4, 6 and 8. But we only really need div-2. */ +static long krait_div2_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), rate * 2); + return DIV_ROUND_UP(*parent_rate, 2); +} + +static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct krait_div2_clk *d = to_krait_div2_clk(hw); + unsigned long flags; + u32 val; + u32 mask = BIT(d->width) - 1; + + if (d->lpl) + mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift; + + spin_lock_irqsave(&krait_clock_reg_lock, flags); + val = krait_get_l2_indirect_reg(d->offset); + val &= ~mask; + krait_set_l2_indirect_reg(d->offset, val); + spin_unlock_irqrestore(&krait_clock_reg_lock, flags); + + return 0; +} + +static unsigned long +krait_div2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct krait_div2_clk *d = to_krait_div2_clk(hw); + u32 mask = BIT(d->width) - 1; + u32 div; + + div = krait_get_l2_indirect_reg(d->offset); + div >>= d->shift; + div &= mask; + div = (div + 1) * 2; + + return DIV_ROUND_UP(parent_rate, div); +} + +const struct clk_ops krait_div2_clk_ops = { + .round_rate = krait_div2_round_rate, + .set_rate = krait_div2_set_rate, + .recalc_rate = krait_div2_recalc_rate, +}; +EXPORT_SYMBOL_GPL(krait_div2_clk_ops); diff --git a/drivers/clk/qcom/clk-krait.h b/drivers/clk/qcom/clk-krait.h new file mode 100644 index 000000000000..5d0063538e5d --- /dev/null +++ b/drivers/clk/qcom/clk-krait.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QCOM_CLK_KRAIT_H +#define __QCOM_CLK_KRAIT_H + +#include <linux/clk-provider.h> + +struct krait_mux_clk { + unsigned int *parent_map; + bool has_safe_parent; + u8 safe_sel; + u32 offset; + u32 mask; + u32 shift; + u32 en_mask; + bool lpl; + + struct clk_hw hw; +}; + +#define to_krait_mux_clk(_hw) container_of(_hw, struct krait_mux_clk, hw) + +extern const struct clk_ops krait_mux_clk_ops; + +struct krait_div2_clk { + u32 offset; + u8 width; + u32 shift; + bool lpl; + + struct clk_hw hw; +}; + +#define to_krait_div2_clk(_hw) container_of(_hw, struct krait_div2_clk, hw) + +extern const struct clk_ops krait_div2_clk_ops; + +#endif diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c index 5b940d629045..cb6cb8710daf 100644 --- a/drivers/clk/qcom/clk-pll.c +++ b/drivers/clk/qcom/clk-pll.c @@ -23,16 +23,11 @@ #include <asm/div64.h> #include "clk-pll.h" +#include "common.h" #define PLL_OUTCTRL BIT(0) #define PLL_BYPASSNL BIT(1) #define PLL_RESET_N BIT(2) -#define PLL_LOCK_COUNT_SHIFT 8 -#define PLL_LOCK_COUNT_MASK 0x3f -#define PLL_BIAS_COUNT_SHIFT 14 -#define PLL_BIAS_COUNT_MASK 0x3f -#define PLL_VOTE_FSM_ENA BIT(20) -#define PLL_VOTE_FSM_RESET BIT(21) static int clk_pll_enable(struct clk_hw *hw) { @@ -228,26 +223,6 @@ const struct clk_ops clk_pll_vote_ops = { }; EXPORT_SYMBOL_GPL(clk_pll_vote_ops); -static void -clk_pll_set_fsm_mode(struct clk_pll *pll, struct regmap *regmap, u8 lock_count) -{ - u32 val; - u32 mask; - - /* De-assert reset to FSM */ - regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_RESET, 0); - - /* Program bias count and lock count */ - val = 1 << PLL_BIAS_COUNT_SHIFT | lock_count << PLL_LOCK_COUNT_SHIFT; - mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT; - mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT; - regmap_update_bits(regmap, pll->mode_reg, mask, val); - - /* Enable PLL FSM voting */ - regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_ENA, - PLL_VOTE_FSM_ENA); -} - static void clk_pll_configure(struct clk_pll *pll, struct regmap *regmap, const struct pll_config *config) { @@ -280,7 +255,7 @@ void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap, { clk_pll_configure(pll, regmap, config); if (fsm_mode) - clk_pll_set_fsm_mode(pll, regmap, 8); + qcom_pll_set_fsm_mode(regmap, pll->mode_reg, 1, 8); } EXPORT_SYMBOL_GPL(clk_pll_configure_sr); @@ -289,7 +264,7 @@ void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap, { clk_pll_configure(pll, regmap, config); if (fsm_mode) - clk_pll_set_fsm_mode(pll, regmap, 0); + qcom_pll_set_fsm_mode(regmap, pll->mode_reg, 1, 0); } EXPORT_SYMBOL_GPL(clk_pll_configure_sr_hpm_lp); diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h index b904c335cda4..1b3e8d265bdb 100644 --- a/drivers/clk/qcom/clk-rcg.h +++ b/drivers/clk/qcom/clk-rcg.h @@ -173,6 +173,7 @@ struct clk_rcg2 { #define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr) extern const struct clk_ops clk_rcg2_ops; +extern const struct clk_ops clk_rcg2_floor_ops; extern const struct clk_ops clk_rcg2_shared_ops; extern const struct clk_ops clk_edp_pixel_ops; extern const struct clk_ops clk_byte_ops; diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index a071bba8018c..1a0985ae20d2 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -47,6 +47,11 @@ #define N_REG 0xc #define D_REG 0x10 +enum freq_policy { + FLOOR, + CEIL, +}; + static int clk_rcg2_is_enabled(struct clk_hw *hw) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); @@ -176,15 +181,26 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) return calc_rate(parent_rate, m, n, mode, hid_div); } -static int _freq_tbl_determine_rate(struct clk_hw *hw, - const struct freq_tbl *f, struct clk_rate_request *req) +static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, + struct clk_rate_request *req, + enum freq_policy policy) { unsigned long clk_flags, rate = req->rate; struct clk_hw *p; struct clk_rcg2 *rcg = to_clk_rcg2(hw); int index; - f = qcom_find_freq(f, rate); + switch (policy) { + case FLOOR: + f = qcom_find_freq_floor(f, rate); + break; + case CEIL: + f = qcom_find_freq(f, rate); + break; + default: + return -EINVAL; + }; + if (!f) return -EINVAL; @@ -221,7 +237,15 @@ static int clk_rcg2_determine_rate(struct clk_hw *hw, { struct clk_rcg2 *rcg = to_clk_rcg2(hw); - return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req); + return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL); +} + +static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + + return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR); } static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) @@ -265,12 +289,23 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) return update_config(rcg); } -static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate) +static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, + enum freq_policy policy) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); const struct freq_tbl *f; - f = qcom_find_freq(rcg->freq_tbl, rate); + switch (policy) { + case FLOOR: + f = qcom_find_freq_floor(rcg->freq_tbl, rate); + break; + case CEIL: + f = qcom_find_freq(rcg->freq_tbl, rate); + break; + default: + return -EINVAL; + }; + if (!f) return -EINVAL; @@ -280,13 +315,25 @@ static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate) static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { - return __clk_rcg2_set_rate(hw, rate); + return __clk_rcg2_set_rate(hw, rate, CEIL); +} + +static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return __clk_rcg2_set_rate(hw, rate, FLOOR); } static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate, u8 index) { - return __clk_rcg2_set_rate(hw, rate); + return __clk_rcg2_set_rate(hw, rate, CEIL); +} + +static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw, + unsigned long rate, unsigned long parent_rate, u8 index) +{ + return __clk_rcg2_set_rate(hw, rate, FLOOR); } const struct clk_ops clk_rcg2_ops = { @@ -300,6 +347,17 @@ const struct clk_ops clk_rcg2_ops = { }; EXPORT_SYMBOL_GPL(clk_rcg2_ops); +const struct clk_ops clk_rcg2_floor_ops = { + .is_enabled = clk_rcg2_is_enabled, + .get_parent = clk_rcg2_get_parent, + .set_parent = clk_rcg2_set_parent, + .recalc_rate = clk_rcg2_recalc_rate, + .determine_rate = clk_rcg2_determine_floor_rate, + .set_rate = clk_rcg2_set_floor_rate, + .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent, +}; +EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops); + static int clk_rcg2_shared_force_enable(struct clk_hw *hw, unsigned long rate) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); @@ -323,7 +381,7 @@ static int clk_rcg2_shared_force_enable(struct clk_hw *hw, unsigned long rate) pr_err("%s: RCG did not turn on\n", name); /* set clock rate */ - ret = __clk_rcg2_set_rate(hw, rate); + ret = __clk_rcg2_set_rate(hw, rate, CEIL); if (ret) return ret; diff --git a/drivers/clk/qcom/clk-regmap-mux-div.c b/drivers/clk/qcom/clk-regmap-mux-div.c new file mode 100644 index 000000000000..682dbf906c89 --- /dev/null +++ b/drivers/clk/qcom/clk-regmap-mux-div.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/regmap.h> + +#include "clk-regmap-mux-div.h" + +#define CMD_RCGR 0x0 +#define CMD_RCGR_UPDATE BIT(0) +#define CMD_RCGR_DIRTY_CFG BIT(4) +#define CMD_RCGR_ROOT_OFF BIT(31) +#define CFG_RCGR 0x4 + +#define to_clk_regmap_mux_div(_hw) \ + container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr) + +int __mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div) +{ + int ret, count; + u32 val, mask; + const char *name = clk_hw_get_name(&md->clkr.hw); + + val = (div << md->hid_shift) | (src << md->src_shift); + mask = ((BIT(md->hid_width) - 1) << md->hid_shift) | + ((BIT(md->src_width) - 1) << md->src_shift); + + ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset, + mask, val); + if (ret) + return ret; + + ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset, + CMD_RCGR_UPDATE, CMD_RCGR_UPDATE); + if (ret) + return ret; + + /* Wait for update to take effect */ + for (count = 500; count > 0; count--) { + ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, + &val); + if (ret) + return ret; + if (!(val & CMD_RCGR_UPDATE)) + return 0; + udelay(1); + } + + pr_err("%s: RCG did not update its configuration", name); + return -EBUSY; +} + +static void __mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, + u32 *div) +{ + u32 val, d, s; + const char *name = clk_hw_get_name(&md->clkr.hw); + + regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val); + + if (val & CMD_RCGR_DIRTY_CFG) { + pr_err("%s: RCG configuration is pending\n", name); + return; + } + + regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val); + s = (val >> md->src_shift); + s &= BIT(md->src_width) - 1; + *src = s; + + d = (val >> md->hid_shift); + d &= BIT(md->hid_width) - 1; + *div = d; +} + +static inline bool is_better_rate(unsigned long req, unsigned long best, + unsigned long new) +{ + return (req <= new && new < best) || (best < req && best < new); +} + +static int mux_div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + unsigned int i, div, max_div; + unsigned long actual_rate, best_rate = 0; + unsigned long req_rate = req->rate; + + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i); + unsigned long parent_rate = clk_hw_get_rate(parent); + + max_div = BIT(md->hid_width) - 1; + for (div = 1; div < max_div; div++) { + parent_rate = mult_frac(req_rate, div, 2); + parent_rate = clk_hw_round_rate(parent, parent_rate); + actual_rate = mult_frac(parent_rate, 2, div); + + if (is_better_rate(req_rate, best_rate, actual_rate)) { + best_rate = actual_rate; + req->rate = best_rate; + req->best_parent_rate = parent_rate; + req->best_parent_hw = parent; + } + + if (actual_rate < req_rate || best_rate <= req_rate) + break; + } + } + + if (!best_rate) + return -EINVAL; + + return 0; +} + +static int __mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, + unsigned long prate, u32 src) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + int ret; + u32 div, max_div, best_src = 0, best_div = 0; + unsigned int i; + unsigned long actual_rate, best_rate = 0; + + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i); + unsigned long parent_rate = clk_hw_get_rate(parent); + + max_div = BIT(md->hid_width) - 1; + for (div = 1; div < max_div; div++) { + parent_rate = mult_frac(rate, div, 2); + parent_rate = clk_hw_round_rate(parent, parent_rate); + actual_rate = mult_frac(parent_rate, 2, div); + + if (is_better_rate(rate, best_rate, actual_rate)) { + best_rate = actual_rate; + best_src = md->parent_map[i].cfg; + best_div = div - 1; + } + + if (actual_rate < rate || best_rate <= rate) + break; + } + } + + ret = __mux_div_set_src_div(md, best_src, best_div); + if (!ret) { + md->div = best_div; + md->src = best_src; + } + + return ret; +} + +static u8 mux_div_get_parent(struct clk_hw *hw) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + const char *name = clk_hw_get_name(hw); + u32 i, div, src = 0; + + __mux_div_get_src_div(md, &src, &div); + + for (i = 0; i < clk_hw_get_num_parents(hw); i++) + if (src == md->parent_map[i].cfg) + return i; + + pr_err("%s: Can't find parent with src %d\n", name, src); + return 0; +} + +static int mux_div_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + + return __mux_div_set_src_div(md, md->parent_map[index].cfg, md->div); +} + +static int mux_div_set_rate(struct clk_hw *hw, + unsigned long rate, unsigned long prate) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + + return __mux_div_set_rate_and_parent(hw, rate, prate, md->src); +} + +static int mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, + unsigned long prate, u8 index) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + + return __mux_div_set_rate_and_parent(hw, rate, prate, + md->parent_map[index].cfg); +} + +static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate) +{ + struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw); + u32 div, src; + int i, num_parents = clk_hw_get_num_parents(hw); + const char *name = clk_hw_get_name(hw); + + __mux_div_get_src_div(md, &src, &div); + for (i = 0; i < num_parents; i++) + if (src == md->parent_map[i].cfg) { + struct clk_hw *p = clk_hw_get_parent_by_index(hw, i); + unsigned long parent_rate = clk_hw_get_rate(p); + + return mult_frac(parent_rate, 2, div + 1); + } + + pr_err("%s: Can't find parent %d\n", name, src); + return 0; +} + +const struct clk_ops clk_regmap_mux_div_ops = { + .get_parent = mux_div_get_parent, + .set_parent = mux_div_set_parent, + .set_rate = mux_div_set_rate, + .set_rate_and_parent = mux_div_set_rate_and_parent, + .determine_rate = mux_div_determine_rate, + .recalc_rate = mux_div_recalc_rate, +}; diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h new file mode 100644 index 000000000000..3380e8f6e8a1 --- /dev/null +++ b/drivers/clk/qcom/clk-regmap-mux-div.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2015, Linaro Limited + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QCOM_CLK_REGMAP_MUX_DIV_H__ +#define __QCOM_CLK_REGMAP_MUX_DIV_H__ + +#include <linux/clk-provider.h> +#include "clk-rcg.h" +#include "clk-regmap.h" + +/** + * struct mux_div_clk - combined mux/divider clock + * @reg_offset: offset of the mux/divider register + * @hid_width: number of bits in half integer divider + * @hid_shift: lowest bit of hid value field + * @src_width: number of bits in source select + * @src_shift: lowest bit of source select field + * @div: the divider raw configuration value + * @src: the mux index which will be used if the clock is enabled + * @parent_map: pointer to parent_map struct + * @clkr: handle between common and hardware-specific interfaces + * @clk_nb: clock notifier registered for clock rate changes of the A53 PLL + */ + +struct clk_regmap_mux_div { + u32 reg_offset; + u32 hid_width; + u32 hid_shift; + u32 src_width; + u32 src_shift; + u32 div; + u32 src; + const struct parent_map *parent_map; + struct clk_regmap clkr; + struct notifier_block clk_nb; +}; + +extern const struct clk_ops clk_regmap_mux_div_ops; +int __mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div); + +#endif diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c new file mode 100644 index 000000000000..df3e5fe8442a --- /dev/null +++ b/drivers/clk/qcom/clk-rpm.c @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2016, Linaro Limited + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/mfd/qcom_rpm.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +#include <dt-bindings/mfd/qcom-rpm.h> +#include <dt-bindings/clock/qcom,rpmcc.h> + +#define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63 +#define QCOM_RPM_SCALING_ENABLE_ID 0x2 + +#define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \ + static struct clk_rpm _platform##_##_active; \ + static struct clk_rpm _platform##_##_name = { \ + .rpm_clk_id = (r_id), \ + .peer = &_platform##_##_active, \ + .rate = INT_MAX, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_rpm_ops, \ + .name = #_name, \ + .parent_names = (const char *[]){ "pxo_board" }, \ + .num_parents = 1, \ + }, \ + }; \ + static struct clk_rpm _platform##_##_active = { \ + .rpm_clk_id = (r_id), \ + .peer = &_platform##_##_name, \ + .active_only = true, \ + .rate = INT_MAX, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_rpm_ops, \ + .name = #_active, \ + .parent_names = (const char *[]){ "pxo_board" }, \ + .num_parents = 1, \ + }, \ + } + +#define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \ + static struct clk_rpm _platform##_##_active; \ + static struct clk_rpm _platform##_##_name = { \ + .rpm_clk_id = (r_id), \ + .active_only = true, \ + .peer = &_platform##_##_active, \ + .rate = (r), \ + .branch = true, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_rpm_branch_ops, \ + .name = #_name, \ + .parent_names = (const char *[]){ "pxo_board" }, \ + .num_parents = 1, \ + }, \ + }; \ + static struct clk_rpm _platform##_##_active = { \ + .rpm_clk_id = (r_id), \ + .peer = &_platform##_##_name, \ + .rate = (r), \ + .branch = true, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_rpm_branch_ops, \ + .name = #_active, \ + .parent_names = (const char *[]){ "pxo_board" }, \ + .num_parents = 1, \ + }, \ + } + +#define DEFINE_CLK_RPM_CXO_BRANCH(_platform, _name, _active, r_id, r) \ + static struct clk_rpm _platform##_##_active; \ + static struct clk_rpm _platform##_##_name = { \ + .rpm_clk_id = (r_id), \ + .peer = &_platform##_##_active, \ + .rate = (r), \ + .branch = true, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_rpm_branch_ops, \ + .name = #_name, \ + .parent_names = (const char *[]){ "cxo_board" }, \ + .num_parents = 1, \ + }, \ + }; \ + static struct clk_rpm _platform##_##_active = { \ + .rpm_clk_id = (r_id), \ + .active_only = true, \ + .peer = &_platform##_##_name, \ + .rate = (r), \ + .branch = true, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_rpm_branch_ops, \ + .name = #_active, \ + .parent_names = (const char *[]){ "cxo_board" }, \ + .num_parents = 1, \ + }, \ + } + +#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw) + +struct clk_rpm { + const int rpm_clk_id; + const bool active_only; + unsigned long rate; + bool enabled; + bool branch; + struct clk_rpm *peer; + struct clk_hw hw; + struct qcom_rpm *rpm; +}; + +struct rpm_cc { + struct qcom_rpm *rpm; + struct clk_rpm **clks; + size_t num_clks; +}; + +struct rpm_clk_desc { + struct clk_rpm **clks; + size_t num_clks; +}; + +static DEFINE_MUTEX(rpm_clk_lock); + +static int clk_rpm_handoff(struct clk_rpm *r) +{ + int ret; + u32 value = INT_MAX; + + ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, + r->rpm_clk_id, &value, 1); + if (ret) + return ret; + ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE, + r->rpm_clk_id, &value, 1); + if (ret) + return ret; + + return 0; +} + +static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate) +{ + u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */ + + return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE, + r->rpm_clk_id, &value, 1); +} + +static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate) +{ + u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */ + + return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE, + r->rpm_clk_id, &value, 1); +} + +static void to_active_sleep(struct clk_rpm *r, unsigned long rate, + unsigned long *active, unsigned long *sleep) +{ + *active = rate; + + /* + * Active-only clocks don't care what the rate is during sleep. So, + * they vote for zero. + */ + if (r->active_only) + *sleep = 0; + else + *sleep = *active; +} + +static int clk_rpm_prepare(struct clk_hw *hw) +{ + struct clk_rpm *r = to_clk_rpm(hw); + struct clk_rpm *peer = r->peer; + unsigned long this_rate = 0, this_sleep_rate = 0; + unsigned long peer_rate = 0, peer_sleep_rate = 0; + unsigned long active_rate, sleep_rate; + int ret = 0; + + mutex_lock(&rpm_clk_lock); + + /* Don't send requests to the RPM if the rate has not been set. */ + if (!r->rate) + goto out; + + to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate); + + /* Take peer clock's rate into account only if it's enabled. */ + if (peer->enabled) + to_active_sleep(peer, peer->rate, + &peer_rate, &peer_sleep_rate); + + active_rate = max(this_rate, peer_rate); + + if (r->branch) + active_rate = !!active_rate; + + ret = clk_rpm_set_rate_active(r, active_rate); + if (ret) + goto out; + + sleep_rate = max(this_sleep_rate, peer_sleep_rate); + if (r->branch) + sleep_rate = !!sleep_rate; + + ret = clk_rpm_set_rate_sleep(r, sleep_rate); + if (ret) + /* Undo the active set vote and restore it */ + ret = clk_rpm_set_rate_active(r, peer_rate); + +out: + if (!ret) + r->enabled = true; + + mutex_unlock(&rpm_clk_lock); + + return ret; +} + +static void clk_rpm_unprepare(struct clk_hw *hw) +{ + struct clk_rpm *r = to_clk_rpm(hw); + struct clk_rpm *peer = r->peer; + unsigned long peer_rate = 0, peer_sleep_rate = 0; + unsigned long active_rate, sleep_rate; + int ret; + + mutex_lock(&rpm_clk_lock); + + if (!r->rate) + goto out; + + /* Take peer clock's rate into account only if it's enabled. */ + if (peer->enabled) + to_active_sleep(peer, peer->rate, &peer_rate, + &peer_sleep_rate); + + active_rate = r->branch ? !!peer_rate : peer_rate; + ret = clk_rpm_set_rate_active(r, active_rate); + if (ret) + goto out; + + sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate; + ret = clk_rpm_set_rate_sleep(r, sleep_rate); + if (ret) + goto out; + + r->enabled = false; + +out: + mutex_unlock(&rpm_clk_lock); +} + +static int clk_rpm_set_rate(struct clk_hw *hw, + unsigned long rate, unsigned long parent_rate) +{ + struct clk_rpm *r = to_clk_rpm(hw); + struct clk_rpm *peer = r->peer; + unsigned long active_rate, sleep_rate; + unsigned long this_rate = 0, this_sleep_rate = 0; + unsigned long peer_rate = 0, peer_sleep_rate = 0; + int ret = 0; + + mutex_lock(&rpm_clk_lock); + + if (!r->enabled) + goto out; + + to_active_sleep(r, rate, &this_rate, &this_sleep_rate); + + /* Take peer clock's rate into account only if it's enabled. */ + if (peer->enabled) + to_active_sleep(peer, peer->rate, + &peer_rate, &peer_sleep_rate); + + active_rate = max(this_rate, peer_rate); + ret = clk_rpm_set_rate_active(r, active_rate); + if (ret) + goto out; + + sleep_rate = max(this_sleep_rate, peer_sleep_rate); + ret = clk_rpm_set_rate_sleep(r, sleep_rate); + if (ret) + goto out; + + r->rate = rate; + +out: + mutex_unlock(&rpm_clk_lock); + + return ret; +} + +static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + /* + * RPM handles rate rounding and we don't have a way to + * know what the rate will be, so just return whatever + * rate is requested. + */ + return rate; +} + +static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_rpm *r = to_clk_rpm(hw); + + /* + * RPM handles rate rounding and we don't have a way to + * know what the rate will be, so just return whatever + * rate was set. + */ + return r->rate; +} + +static const struct clk_ops clk_rpm_ops = { + .prepare = clk_rpm_prepare, + .unprepare = clk_rpm_unprepare, + .set_rate = clk_rpm_set_rate, + .round_rate = clk_rpm_round_rate, + .recalc_rate = clk_rpm_recalc_rate, +}; + +static const struct clk_ops clk_rpm_branch_ops = { + .prepare = clk_rpm_prepare, + .unprepare = clk_rpm_unprepare, + .round_rate = clk_rpm_round_rate, + .recalc_rate = clk_rpm_recalc_rate, +}; + +/* apq8064 */ +DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK); +DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK); +DEFINE_CLK_RPM(apq8064, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK); +DEFINE_CLK_RPM(apq8064, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK); +DEFINE_CLK_RPM(apq8064, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK); +DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK); +DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK); +DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK); +DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK); + +static struct clk_rpm *apq8064_clks[] = { + [RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk, + [RPM_APPS_FABRIC_A_CLK] = &apq8064_afab_a_clk, + [RPM_CFPB_CLK] = &apq8064_cfpb_clk, + [RPM_CFPB_A_CLK] = &apq8064_cfpb_a_clk, + [RPM_DAYTONA_FABRIC_CLK] = &apq8064_daytona_clk, + [RPM_DAYTONA_FABRIC_A_CLK] = &apq8064_daytona_a_clk, + [RPM_EBI1_CLK] = &apq8064_ebi1_clk, + [RPM_EBI1_A_CLK] = &apq8064_ebi1_a_clk, + [RPM_MM_FABRIC_CLK] = &apq8064_mmfab_clk, + [RPM_MM_FABRIC_A_CLK] = &apq8064_mmfab_a_clk, + [RPM_MMFPB_CLK] = &apq8064_mmfpb_clk, + [RPM_MMFPB_A_CLK] = &apq8064_mmfpb_a_clk, + [RPM_SYS_FABRIC_CLK] = &apq8064_sfab_clk, + [RPM_SYS_FABRIC_A_CLK] = &apq8064_sfab_a_clk, + [RPM_SFPB_CLK] = &apq8064_sfpb_clk, + [RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk, + [RPM_QDSS_CLK] = &apq8064_qdss_clk, + [RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk, +}; + +static const struct rpm_clk_desc rpm_clk_apq8064 = { + .clks = apq8064_clks, + .num_clks = ARRAY_SIZE(apq8064_clks), +}; + +static const struct of_device_id rpm_clk_match_table[] = { + { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 }, + { } +}; +MODULE_DEVICE_TABLE(of, rpm_clk_match_table); + +static struct clk_hw *qcom_rpm_clk_hw_get(struct of_phandle_args *clkspec, + void *data) +{ + struct rpm_cc *rcc = data; + unsigned int idx = clkspec->args[0]; + + if (idx >= rcc->num_clks) { + pr_err("%s: invalid index %u\n", __func__, idx); + return ERR_PTR(-EINVAL); + } + + return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT); +} + +static int rpm_clk_probe(struct platform_device *pdev) +{ + struct rpm_cc *rcc; + int ret; + size_t num_clks, i; + struct qcom_rpm *rpm; + struct clk_rpm **rpm_clks; + const struct rpm_clk_desc *desc; + + rpm = dev_get_drvdata(pdev->dev.parent); + if (!rpm) { + dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); + return -ENODEV; + } + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + rpm_clks = desc->clks; + num_clks = desc->num_clks; + + rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL); + if (!rcc) + return -ENOMEM; + + rcc->clks = rpm_clks; + rcc->num_clks = num_clks; + + for (i = 0; i < num_clks; i++) { + if (!rpm_clks[i]) + continue; + + rpm_clks[i]->rpm = rpm; + + ret = clk_rpm_handoff(rpm_clks[i]); + if (ret) + goto err; + } + + for (i = 0; i < num_clks; i++) { + if (!rpm_clks[i]) + continue; + + ret = devm_clk_hw_register(&pdev->dev, &rpm_clks[i]->hw); + if (ret) + goto err; + } + + ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_rpm_clk_hw_get, + rcc); + if (ret) + goto err; + + return 0; +err: + dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret); + return ret; +} + +static int rpm_clk_remove(struct platform_device *pdev) +{ + of_clk_del_provider(pdev->dev.of_node); + return 0; +} + +static struct platform_driver rpm_clk_driver = { + .driver = { + .name = "qcom-clk-rpm", + .of_match_table = rpm_clk_match_table, + }, + .probe = rpm_clk_probe, + .remove = rpm_clk_remove, +}; + +static int __init rpm_clk_init(void) +{ + return platform_driver_register(&rpm_clk_driver); +} +core_initcall(rpm_clk_init); + +static void __exit rpm_clk_exit(void) +{ + platform_driver_unregister(&rpm_clk_driver); +} +module_exit(rpm_clk_exit); + +MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:qcom-clk-rpm"); diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c new file mode 100644 index 000000000000..78a7eed7b2ae --- /dev/null +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2016, Linaro Limited + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/soc/qcom/smd-rpm.h> + +#include <dt-bindings/clock/qcom,rpmcc.h> +#include <dt-bindings/mfd/qcom-rpm.h> + +#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773 +#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370 +#define QCOM_RPM_SMD_KEY_RATE 0x007a484b +#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45 +#define QCOM_RPM_SMD_KEY_STATE 0x54415453 +#define QCOM_RPM_SCALING_ENABLE_ID 0x2 + +#define __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, stat_id, \ + key) \ + static struct clk_smd_rpm _platform##_##_active; \ + static struct clk_smd_rpm _platform##_##_name = { \ + .rpm_res_type = (type), \ + .rpm_clk_id = (r_id), \ + .rpm_status_id = (stat_id), \ + .rpm_key = (key), \ + .peer = &_platform##_##_active, \ + .rate = INT_MAX, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_smd_rpm_ops, \ + .name = #_name, \ + .parent_names = (const char *[]){ "xo_board" }, \ + .num_parents = 1, \ + }, \ + }; \ + static struct clk_smd_rpm _platform##_##_active = { \ + .rpm_res_type = (type), \ + .rpm_clk_id = (r_id), \ + .rpm_status_id = (stat_id), \ + .active_only = true, \ + .rpm_key = (key), \ + .peer = &_platform##_##_name, \ + .rate = INT_MAX, \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_smd_rpm_ops, \ + .name = #_active, \ + .parent_names = (const char *[]){ "xo_board" }, \ + .num_parents = 1, \ + }, \ + } + +#define __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, \ + stat_id, r, key) \ + static struct clk_smd_rpm _platform##_##_active; \ + static struct clk_smd_rpm _platform##_##_name = { \ + .rpm_res_type = (type), \ + .rpm_clk_id = (r_id), \ + .rpm_status_id = (stat_id), \ + .rpm_key = (key), \ + .branch = true, \ + .peer = &_platform##_##_active, \ + .rate = (r), \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_smd_rpm_branch_ops, \ + .name = #_name, \ + .parent_names = (const char *[]){ "xo_board" }, \ + .num_parents = 1, \ + }, \ + }; \ + static struct clk_smd_rpm _platform##_##_active = { \ + .rpm_res_type = (type), \ + .rpm_clk_id = (r_id), \ + .rpm_status_id = (stat_id), \ + .active_only = true, \ + .rpm_key = (key), \ + .branch = true, \ + .peer = &_platform##_##_name, \ + .rate = (r), \ + .hw.init = &(struct clk_init_data){ \ + .ops = &clk_smd_rpm_branch_ops, \ + .name = #_active, \ + .parent_names = (const char *[]){ "xo_board" }, \ + .num_parents = 1, \ + }, \ + } + +#define DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id) \ + __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \ + 0, QCOM_RPM_SMD_KEY_RATE) + +#define DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, r) \ + __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, \ + r_id, 0, r, QCOM_RPM_SMD_KEY_ENABLE) + +#define DEFINE_CLK_SMD_RPM_QDSS(_platform, _name, _active, type, r_id) \ + __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \ + 0, QCOM_RPM_SMD_KEY_STATE) + +#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id) \ + __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \ + QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \ + QCOM_RPM_KEY_SOFTWARE_ENABLE) + +#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, r_id) \ + __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \ + QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \ + QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY) + +#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw) + +struct clk_smd_rpm { + const int rpm_res_type; + const int rpm_key; + const int rpm_clk_id; + const int rpm_status_id; + const bool active_only; + bool enabled; + bool branch; + struct clk_smd_rpm *peer; + struct clk_hw hw; + unsigned long rate; + struct qcom_smd_rpm *rpm; +}; + +struct clk_smd_rpm_req { + __le32 key; + __le32 nbytes; + __le32 value; +}; + +struct rpm_cc { + struct qcom_rpm *rpm; + struct clk_smd_rpm **clks; + size_t num_clks; +}; + +struct rpm_smd_clk_desc { + struct clk_smd_rpm **clks; + size_t num_clks; +}; + +static DEFINE_MUTEX(rpm_smd_clk_lock); + +static int clk_smd_rpm_handoff(struct clk_smd_rpm *r) +{ + int ret; + struct clk_smd_rpm_req req = { + .key = cpu_to_le32(r->rpm_key), + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(INT_MAX), + }; + + ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE, + r->rpm_res_type, r->rpm_clk_id, &req, + sizeof(req)); + if (ret) + return ret; + ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE, + r->rpm_res_type, r->rpm_clk_id, &req, + sizeof(req)); + if (ret) + return ret; + + return 0; +} + +static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r, + unsigned long rate) +{ + struct clk_smd_rpm_req req = { + .key = cpu_to_le32(r->rpm_key), + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */ + }; + + return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE, + r->rpm_res_type, r->rpm_clk_id, &req, + sizeof(req)); +} + +static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r, + unsigned long rate) +{ + struct clk_smd_rpm_req req = { + .key = cpu_to_le32(r->rpm_key), + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */ + }; + + return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE, + r->rpm_res_type, r->rpm_clk_id, &req, + sizeof(req)); +} + +static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate, + unsigned long *active, unsigned long *sleep) +{ + *active = rate; + + /* + * Active-only clocks don't care what the rate is during sleep. So, + * they vote for zero. + */ + if (r->active_only) + *sleep = 0; + else + *sleep = *active; +} + +static int clk_smd_rpm_prepare(struct clk_hw *hw) +{ + struct clk_smd_rpm *r = to_clk_smd_rpm(hw); + struct clk_smd_rpm *peer = r->peer; + unsigned long this_rate = 0, this_sleep_rate = 0; + unsigned long peer_rate = 0, peer_sleep_rate = 0; + unsigned long active_rate, sleep_rate; + int ret = 0; + + mutex_lock(&rpm_smd_clk_lock); + + /* Don't send requests to the RPM if the rate has not been set. */ + if (!r->rate) + goto out; + + to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate); + + /* Take peer clock's rate into account only if it's enabled. */ + if (peer->enabled) + to_active_sleep(peer, peer->rate, + &peer_rate, &peer_sleep_rate); + + active_rate = max(this_rate, peer_rate); + + if (r->branch) + active_rate = !!active_rate; + + ret = clk_smd_rpm_set_rate_active(r, active_rate); + if (ret) + goto out; + + sleep_rate = max(this_sleep_rate, peer_sleep_rate); + if (r->branch) + sleep_rate = !!sleep_rate; + + ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate); + if (ret) + /* Undo the active set vote and restore it */ + ret = clk_smd_rpm_set_rate_active(r, peer_rate); + +out: + if (!ret) + r->enabled = true; + + mutex_unlock(&rpm_smd_clk_lock); + + return ret; +} + +static void clk_smd_rpm_unprepare(struct clk_hw *hw) +{ + struct clk_smd_rpm *r = to_clk_smd_rpm(hw); + struct clk_smd_rpm *peer = r->peer; + unsigned long peer_rate = 0, peer_sleep_rate = 0; + unsigned long active_rate, sleep_rate; + int ret; + + mutex_lock(&rpm_smd_clk_lock); + + if (!r->rate) + goto out; + + /* Take peer clock's rate into account only if it's enabled. */ + if (peer->enabled) + to_active_sleep(peer, peer->rate, &peer_rate, + &peer_sleep_rate); + + active_rate = r->branch ? !!peer_rate : peer_rate; + ret = clk_smd_rpm_set_rate_active(r, active_rate); + if (ret) + goto out; + + sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate; + ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate); + if (ret) + goto out; + + r->enabled = false; + +out: + mutex_unlock(&rpm_smd_clk_lock); +} + +static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_smd_rpm *r = to_clk_smd_rpm(hw); + struct clk_smd_rpm *peer = r->peer; + unsigned long active_rate, sleep_rate; + unsigned long this_rate = 0, this_sleep_rate = 0; + unsigned long peer_rate = 0, peer_sleep_rate = 0; + int ret = 0; + + mutex_lock(&rpm_smd_clk_lock); + + if (!r->enabled) + goto out; + + to_active_sleep(r, rate, &this_rate, &this_sleep_rate); + + /* Take peer clock's rate into account only if it's enabled. */ + if (peer->enabled) + to_active_sleep(peer, peer->rate, + &peer_rate, &peer_sleep_rate); + + active_rate = max(this_rate, peer_rate); + ret = clk_smd_rpm_set_rate_active(r, active_rate); + if (ret) + goto out; + + sleep_rate = max(this_sleep_rate, peer_sleep_rate); + ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate); + if (ret) + goto out; + + r->rate = rate; + +out: + mutex_unlock(&rpm_smd_clk_lock); + + return ret; +} + +static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + /* + * RPM handles rate rounding and we don't have a way to + * know what the rate will be, so just return whatever + * rate is requested. + */ + return rate; +} + +static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_smd_rpm *r = to_clk_smd_rpm(hw); + + /* + * RPM handles rate rounding and we don't have a way to + * know what the rate will be, so just return whatever + * rate was set. + */ + return r->rate; +} + +static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm) +{ + int ret; + struct clk_smd_rpm_req req = { + .key = cpu_to_le32(QCOM_RPM_SMD_KEY_ENABLE), + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(1), + }; + + ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE, + QCOM_SMD_RPM_MISC_CLK, + QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req)); + if (ret) { + pr_err("RPM clock scaling (sleep set) not enabled!\n"); + return ret; + } + + ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE, + QCOM_SMD_RPM_MISC_CLK, + QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req)); + if (ret) { + pr_err("RPM clock scaling (active set) not enabled!\n"); + return ret; + } + + pr_debug("%s: RPM clock scaling is enabled\n", __func__); + return 0; +} + +static const struct clk_ops clk_smd_rpm_ops = { + .prepare = clk_smd_rpm_prepare, + .unprepare = clk_smd_rpm_unprepare, + .set_rate = clk_smd_rpm_set_rate, + .round_rate = clk_smd_rpm_round_rate, + .recalc_rate = clk_smd_rpm_recalc_rate, +}; + +static const struct clk_ops clk_smd_rpm_branch_ops = { + .prepare = clk_smd_rpm_prepare, + .unprepare = clk_smd_rpm_unprepare, + .round_rate = clk_smd_rpm_round_rate, + .recalc_rate = clk_smd_rpm_recalc_rate, +}; + +/* msm8916 */ +DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5); + +static struct clk_smd_rpm *msm8916_clks[] = { + [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk, + [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk, + [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk, + [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk, + [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk, + [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1, + [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a, + [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2, + [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a, + [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1, + [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a, + [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2, + [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a, + [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin, + [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin, + [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin, + [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin, + [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin, + [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin, + [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin, + [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8916 = { + .clks = msm8916_clks, + .num_clks = ARRAY_SIZE(msm8916_clks), +}; + +static const struct of_device_id rpm_smd_clk_match_table[] = { + { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, + { } +}; +MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table); + +static struct clk_hw *qcom_smdrpm_clk_hw_get(struct of_phandle_args *clkspec, + void *data) +{ + struct rpm_cc *rcc = data; + unsigned int idx = clkspec->args[0]; + + if (idx >= rcc->num_clks) { + pr_err("%s: invalid index %u\n", __func__, idx); + return ERR_PTR(-EINVAL); + } + + return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT); +} + +static int rpm_smd_clk_probe(struct platform_device *pdev) +{ + struct rpm_cc *rcc; + int ret; + size_t num_clks, i; + struct qcom_smd_rpm *rpm; + struct clk_smd_rpm **rpm_smd_clks; + const struct rpm_smd_clk_desc *desc; + struct clk *clk; + + rpm = dev_get_drvdata(pdev->dev.parent); + if (!rpm) { + dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); + return -ENODEV; + } + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + rpm_smd_clks = desc->clks; + num_clks = desc->num_clks; + + rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL); + if (!rcc) + return -ENOMEM; + + rcc->clks = rpm_smd_clks; + rcc->num_clks = num_clks; + + for (i = 0; i < num_clks; i++) { + if (!rpm_smd_clks[i]) + continue; + + rpm_smd_clks[i]->rpm = rpm; + + ret = clk_smd_rpm_handoff(rpm_smd_clks[i]); + if (ret) + goto err; + } + + ret = clk_smd_rpm_enable_scaling(rpm); + if (ret) + goto err; + + for (i = 0; i < num_clks; i++) { + if (!rpm_smd_clks[i]) + continue; + + clk = devm_clk_register(&pdev->dev, &rpm_smd_clks[i]->hw); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto err; + } + + clk_set_rate(clk, INT_MAX); + clk_prepare_enable(clk); + } + + ret = of_clk_add_hw_provider(pdev->dev.of_node, qcom_smdrpm_clk_hw_get, + rcc); + if (ret) + goto err; + + return 0; +err: + dev_err(&pdev->dev, "Error registering SMD clock driver (%d)\n", ret); + return ret; +} + +static int rpm_smd_clk_remove(struct platform_device *pdev) +{ + of_clk_del_provider(pdev->dev.of_node); + return 0; +} + +static struct platform_driver rpm_smd_clk_driver = { + .driver = { + .name = "qcom-clk-smd-rpm", + .of_match_table = rpm_smd_clk_match_table, + }, + .probe = rpm_smd_clk_probe, + .remove = rpm_smd_clk_remove, +}; + +static int __init rpm_smd_clk_init(void) +{ + return platform_driver_register(&rpm_smd_clk_driver); +} +core_initcall(rpm_smd_clk_init); + +static void __exit rpm_smd_clk_exit(void) +{ + platform_driver_unregister(&rpm_smd_clk_driver); +} +module_exit(rpm_smd_clk_exit); + +MODULE_DESCRIPTION("Qualcomm RPM over SMD Clock Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:qcom-clk-smd-rpm"); diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index fffcbaf0fba7..cfab7b400381 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c @@ -46,6 +46,22 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate) } EXPORT_SYMBOL_GPL(qcom_find_freq); +const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f, + unsigned long rate) +{ + const struct freq_tbl *best = NULL; + + for ( ; f->freq; f++) { + if (rate >= f->freq) + best = f; + else + break; + } + + return best; +} +EXPORT_SYMBOL_GPL(qcom_find_freq_floor); + int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src) { int i, num_parents = clk_hw_get_num_parents(hw); @@ -74,6 +90,27 @@ qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc) } EXPORT_SYMBOL_GPL(qcom_cc_map); +void +qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count) +{ + u32 val; + u32 mask; + + /* De-assert reset to FSM */ + regmap_update_bits(map, reg, PLL_VOTE_FSM_RESET, 0); + + /* Program bias count and lock count */ + val = bias_count << PLL_BIAS_COUNT_SHIFT | + lock_count << PLL_LOCK_COUNT_SHIFT; + mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT; + mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT; + regmap_update_bits(map, reg, mask, val); + + /* Enable PLL FSM voting */ + regmap_update_bits(map, reg, PLL_VOTE_FSM_ENA, PLL_VOTE_FSM_ENA); +} +EXPORT_SYMBOL_GPL(qcom_pll_set_fsm_mode); + static void qcom_cc_del_clk_provider(void *data) { of_clk_del_provider(data); @@ -153,15 +190,12 @@ int qcom_cc_register_board_clk(struct device *dev, const char *path, const char *name, unsigned long rate) { bool add_factor = true; - struct device_node *node; - - /* The RPM clock driver will add the factor clock if present */ - if (IS_ENABLED(CONFIG_QCOM_RPMCC)) { - node = of_find_compatible_node(NULL, NULL, "qcom,rpmcc"); - if (of_device_is_available(node)) - add_factor = false; - of_node_put(node); - } + + /* + * TODO: The RPM clock driver currently does not support the xo clock. + * When xo is added to the RPM clock driver, we should change this + * function to skip registration of xo factor clocks. + */ return _qcom_cc_register_board_clk(dev, path, name, rate, add_factor); } diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h index ae9bdeb21f29..23c1927669ba 100644 --- a/drivers/clk/qcom/common.h +++ b/drivers/clk/qcom/common.h @@ -22,6 +22,13 @@ struct freq_tbl; struct clk_hw; struct parent_map; +#define PLL_LOCK_COUNT_SHIFT 8 +#define PLL_LOCK_COUNT_MASK 0x3f +#define PLL_BIAS_COUNT_SHIFT 14 +#define PLL_BIAS_COUNT_MASK 0x3f +#define PLL_VOTE_FSM_ENA BIT(20) +#define PLL_VOTE_FSM_RESET BIT(21) + struct qcom_cc_desc { const struct regmap_config *config; struct clk_regmap **clks; @@ -34,6 +41,10 @@ struct qcom_cc_desc { extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate); +extern const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f, + unsigned long rate); +extern void +qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count); extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src); diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c index 070037a29ea5..486d9610355c 100644 --- a/drivers/clk/qcom/gcc-apq8084.c +++ b/drivers/clk/qcom/gcc-apq8084.c @@ -1142,7 +1142,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { .name = "sdcc1_apps_clk_src", .parent_names = gcc_xo_gpll0_gpll4, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1156,7 +1156,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = { .name = "sdcc2_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1170,7 +1170,7 @@ static struct clk_rcg2 sdcc3_apps_clk_src = { .name = "sdcc3_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1184,7 +1184,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = { .name = "sdcc4_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c index b593065de8db..33d09138f5e5 100644 --- a/drivers/clk/qcom/gcc-ipq4019.c +++ b/drivers/clk/qcom/gcc-ipq4019.c @@ -185,8 +185,7 @@ static struct clk_branch gcc_audio_pwm_clk = { }; static const struct freq_tbl ftbl_gcc_blsp1_qup1_2_i2c_apps_clk[] = { - F(19200000, P_XO, 1, 2, 5), - F(24000000, P_XO, 1, 1, 2), + F(19050000, P_FEPLL200, 10.5, 1, 1), { } }; diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index 28eb200d0f1e..91d8eb60bb03 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -30,6 +30,7 @@ #include "clk-pll.h" #include "clk-rcg.h" #include "clk-branch.h" +#include "clk-hfpll.h" #include "reset.h" static struct clk_pll pll0 = { @@ -113,6 +114,85 @@ static struct clk_regmap pll8_vote = { }, }; +static struct hfpll_data hfpll0_data = { + .mode_reg = 0x3200, + .l_reg = 0x3208, + .m_reg = 0x320c, + .n_reg = 0x3210, + .config_reg = 0x3204, + .status_reg = 0x321c, + .config_val = 0x7845c665, + .droop_reg = 0x3214, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll0 = { + .d = &hfpll0_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll0", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock), +}; + +static struct hfpll_data hfpll1_data = { + .mode_reg = 0x3240, + .l_reg = 0x3248, + .m_reg = 0x324c, + .n_reg = 0x3250, + .config_reg = 0x3244, + .status_reg = 0x325c, + .config_val = 0x7845c665, + .droop_reg = 0x3314, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll1 = { + .d = &hfpll1_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll1", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock), +}; + +static struct hfpll_data hfpll_l2_data = { + .mode_reg = 0x3300, + .l_reg = 0x3308, + .m_reg = 0x330c, + .n_reg = 0x3310, + .config_reg = 0x3304, + .status_reg = 0x331c, + .config_val = 0x7845c665, + .droop_reg = 0x3314, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll_l2 = { + .d = &hfpll_l2_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll_l2", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock), +}; + + static struct clk_pll pll14 = { .l_reg = 0x31c4, .m_reg = 0x31c8, @@ -2800,6 +2880,9 @@ static struct clk_regmap *gcc_ipq806x_clks[] = { [UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr, [NSSTCM_CLK_SRC] = &nss_tcm_src.clkr, [NSSTCM_CLK] = &nss_tcm_clk.clkr, + [PLL9] = &hfpll0.clkr, + [PLL10] = &hfpll1.clkr, + [PLL12] = &hfpll_l2.clkr, }; static const struct qcom_reset_map gcc_ipq806x_resets[] = { diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 5c4e193164d4..2cfe7000fc60 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -1107,7 +1107,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { .name = "sdcc1_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1132,7 +1132,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = { .name = "sdcc2_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1430,6 +1430,7 @@ static struct clk_branch gcc_ultaudio_stc_xo_clk = { }; static const struct freq_tbl ftbl_codec_clk[] = { + F(9600000, P_XO, 2, 0, 0), F(19200000, P_XO, 1, 0, 0), F(11289600, P_EXT_MCLK, 1, 0, 0), { } diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index eb551c75fba6..c34484e44569 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c @@ -30,6 +30,7 @@ #include "clk-pll.h" #include "clk-rcg.h" #include "clk-branch.h" +#include "clk-hfpll.h" #include "reset.h" static struct clk_pll pll3 = { @@ -86,6 +87,164 @@ static struct clk_regmap pll8_vote = { }, }; +static struct hfpll_data hfpll0_data = { + .mode_reg = 0x3200, + .l_reg = 0x3208, + .m_reg = 0x320c, + .n_reg = 0x3210, + .config_reg = 0x3204, + .status_reg = 0x321c, + .config_val = 0x7845c665, + .droop_reg = 0x3214, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll0 = { + .d = &hfpll0_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll0", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock), +}; + +static struct hfpll_data hfpll1_8064_data = { + .mode_reg = 0x3240, + .l_reg = 0x3248, + .m_reg = 0x324c, + .n_reg = 0x3250, + .config_reg = 0x3244, + .status_reg = 0x325c, + .config_val = 0x7845c665, + .droop_reg = 0x3254, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct hfpll_data hfpll1_data = { + .mode_reg = 0x3300, + .l_reg = 0x3308, + .m_reg = 0x330c, + .n_reg = 0x3310, + .config_reg = 0x3304, + .status_reg = 0x331c, + .config_val = 0x7845c665, + .droop_reg = 0x3314, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll1 = { + .d = &hfpll1_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll1", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock), +}; + +static struct hfpll_data hfpll2_data = { + .mode_reg = 0x3280, + .l_reg = 0x3288, + .m_reg = 0x328c, + .n_reg = 0x3290, + .config_reg = 0x3284, + .status_reg = 0x329c, + .config_val = 0x7845c665, + .droop_reg = 0x3294, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll2 = { + .d = &hfpll2_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll2", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll2.lock), +}; + +static struct hfpll_data hfpll3_data = { + .mode_reg = 0x32c0, + .l_reg = 0x32c8, + .m_reg = 0x32cc, + .n_reg = 0x32d0, + .config_reg = 0x32c4, + .status_reg = 0x32dc, + .config_val = 0x7845c665, + .droop_reg = 0x32d4, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll3 = { + .d = &hfpll3_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll3", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll3.lock), +}; + +static struct hfpll_data hfpll_l2_8064_data = { + .mode_reg = 0x3300, + .l_reg = 0x3308, + .m_reg = 0x330c, + .n_reg = 0x3310, + .config_reg = 0x3304, + .status_reg = 0x331c, + .config_val = 0x7845c665, + .droop_reg = 0x3314, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct hfpll_data hfpll_l2_data = { + .mode_reg = 0x3400, + .l_reg = 0x3408, + .m_reg = 0x340c, + .n_reg = 0x3410, + .config_reg = 0x3404, + .status_reg = 0x341c, + .config_val = 0x7845c665, + .droop_reg = 0x3414, + .droop_val = 0x0108c000, + .min_rate = 600000000UL, + .max_rate = 1800000000UL, +}; + +static struct clk_hfpll hfpll_l2 = { + .d = &hfpll_l2_data, + .clkr.hw.init = &(struct clk_init_data){ + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .name = "hfpll_l2", + .ops = &clk_ops_hfpll, + .flags = CLK_IGNORE_UNUSED, + }, + .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock), +}; + static struct clk_pll pll14 = { .l_reg = 0x31c4, .m_reg = 0x31c8, @@ -3112,6 +3271,9 @@ static struct clk_regmap *gcc_msm8960_clks[] = { [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr, [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr, [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr, + [PLL9] = &hfpll0.clkr, + [PLL10] = &hfpll1.clkr, + [PLL12] = &hfpll_l2.clkr, }; static const struct qcom_reset_map gcc_msm8960_resets[] = { @@ -3323,6 +3485,11 @@ static struct clk_regmap *gcc_apq8064_clks[] = { [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr, [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr, [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr, + [PLL9] = &hfpll0.clkr, + [PLL10] = &hfpll1.clkr, + [PLL12] = &hfpll_l2.clkr, + [PLL16] = &hfpll2.clkr, + [PLL17] = &hfpll3.clkr, }; static const struct qcom_reset_map gcc_apq8064_resets[] = { @@ -3470,6 +3637,11 @@ static int gcc_msm8960_probe(struct platform_device *pdev) if (!match) return -EINVAL; + if (match->data == &gcc_apq8064_desc) { + hfpll1.d = &hfpll1_8064_data; + hfpll_l2.d = &hfpll_l2_8064_data; + } + ret = qcom_cc_register_board_clk(dev, "cxo_board", "cxo", 19200000); if (ret) return ret; diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c index 00915209e7c5..348e30da4f18 100644 --- a/drivers/clk/qcom/gcc-msm8974.c +++ b/drivers/clk/qcom/gcc-msm8974.c @@ -872,7 +872,7 @@ static struct clk_init_data sdcc1_apps_clk_src_init = { .name = "sdcc1_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }; static struct clk_rcg2 sdcc1_apps_clk_src = { @@ -894,7 +894,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = { .name = "sdcc2_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -908,7 +908,7 @@ static struct clk_rcg2 sdcc3_apps_clk_src = { .name = "sdcc3_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -922,7 +922,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = { .name = "sdcc4_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c new file mode 100644 index 000000000000..8afd8304a070 --- /dev/null +++ b/drivers/clk/qcom/gcc-msm8994.c @@ -0,0 +1,2300 @@ +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/ctype.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,gcc-msm8994.h> + +#include "common.h" +#include "clk-regmap.h" +#include "clk-alpha-pll.h" +#include "clk-rcg.h" +#include "clk-branch.h" +#include "reset.h" + +enum { + P_XO, + P_GPLL0, + P_GPLL4, +}; + +static const struct parent_map gcc_xo_gpll0_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, +}; + +static const char * const gcc_xo_gpll0[] = { + "xo", + "gpll0", +}; + +static const struct parent_map gcc_xo_gpll0_gpll4_map[] = { + { P_XO, 0 }, + { P_GPLL0, 1 }, + { P_GPLL4, 5 }, +}; + +static const char * const gcc_xo_gpll0_gpll4[] = { + "xo", + "gpll0", + "gpll4", +}; + +#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } + +static struct clk_fixed_factor xo = { + .mult = 1, + .div = 1, + .hw.init = &(struct clk_init_data) + { + .name = "xo", + .parent_names = (const char *[]) { "xo_board" }, + .num_parents = 1, + .ops = &clk_fixed_factor_ops, + }, +}; + +static struct clk_alpha_pll gpll0_early = { + .offset = 0x00000, + .clkr = { + .enable_reg = 0x1480, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gpll0_early", + .parent_names = (const char *[]) { "xo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_alpha_pll_postdiv gpll0 = { + .offset = 0x00000, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "gpll0", + .parent_names = (const char *[]) { "gpll0_early" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_ops, + }, +}; + +static struct clk_alpha_pll gpll4_early = { + .offset = 0x1dc0, + .clkr = { + .enable_reg = 0x1480, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data) + { + .name = "gpll4_early", + .parent_names = (const char *[]) { "xo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + }, + }, +}; + +static struct clk_alpha_pll_postdiv gpll4 = { + .offset = 0x1dc0, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "gpll4", + .parent_names = (const char *[]) { "gpll4_early" }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_ops, + }, +}; + +static struct freq_tbl ftbl_ufs_axi_clk_src[] = { + F(50000000, P_GPLL0, 12, 0, 0), + F(100000000, P_GPLL0, 6, 0, 0), + F(150000000, P_GPLL0, 4, 0, 0), + F(171430000, P_GPLL0, 3.5, 0, 0), + F(200000000, P_GPLL0, 3, 0, 0), + F(240000000, P_GPLL0, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 ufs_axi_clk_src = { + .cmd_rcgr = 0x1d68, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_ufs_axi_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "ufs_axi_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_usb30_master_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(125000000, P_GPLL0, 1, 5, 24), + { } +}; + +static struct clk_rcg2 usb30_master_clk_src = { + .cmd_rcgr = 0x03d4, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_usb30_master_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "usb30_master_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0, 12, 0, 0), + { } +}; + +static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = { + .cmd_rcgr = 0x0660, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup1_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_blspqup_spi_apps_clk_src[] = { + F(960000, P_XO, 10, 1, 2), + F(4800000, P_XO, 4, 0, 0), + F(9600000, P_XO, 2, 0, 0), + F(15000000, P_GPLL0, 10, 1, 4), + F(19200000, P_XO, 1, 0, 0), + F(24000000, P_GPLL0, 12.5, 1, 2), + F(25000000, P_GPLL0, 12, 1, 2), + F(48000000, P_GPLL0, 12.5, 0, 0), + F(50000000, P_GPLL0, 12, 0, 0), + { } +}; + +static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = { + .cmd_rcgr = 0x064c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup1_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = { + .cmd_rcgr = 0x06e0, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup2_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = { + .cmd_rcgr = 0x06cc, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup2_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = { + .cmd_rcgr = 0x0760, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup3_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = { + .cmd_rcgr = 0x074c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup3_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = { + .cmd_rcgr = 0x07e0, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup4_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = { + .cmd_rcgr = 0x07cc, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup4_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = { + .cmd_rcgr = 0x0860, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup5_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = { + .cmd_rcgr = 0x084c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup5_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = { + .cmd_rcgr = 0x08e0, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup6_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = { + .cmd_rcgr = 0x08cc, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_qup6_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = { + F(3686400, P_GPLL0, 1, 96, 15625), + F(7372800, P_GPLL0, 1, 192, 15625), + F(14745600, P_GPLL0, 1, 384, 15625), + F(16000000, P_GPLL0, 5, 2, 15), + F(19200000, P_XO, 1, 0, 0), + F(24000000, P_GPLL0, 5, 1, 5), + F(32000000, P_GPLL0, 1, 4, 75), + F(40000000, P_GPLL0, 15, 0, 0), + F(46400000, P_GPLL0, 1, 29, 375), + F(48000000, P_GPLL0, 12.5, 0, 0), + F(51200000, P_GPLL0, 1, 32, 375), + F(56000000, P_GPLL0, 1, 7, 75), + F(58982400, P_GPLL0, 1, 1536, 15625), + F(60000000, P_GPLL0, 10, 0, 0), + F(63160000, P_GPLL0, 9.5, 0, 0), + { } +}; + +static struct clk_rcg2 blsp1_uart1_apps_clk_src = { + .cmd_rcgr = 0x068c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_uart1_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_uart2_apps_clk_src = { + .cmd_rcgr = 0x070c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_uart2_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_uart3_apps_clk_src = { + .cmd_rcgr = 0x078c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_uart3_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_uart4_apps_clk_src = { + .cmd_rcgr = 0x080c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_uart4_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_uart5_apps_clk_src = { + .cmd_rcgr = 0x088c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_uart5_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp1_uart6_apps_clk_src = { + .cmd_rcgr = 0x090c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp1_uart6_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = { + .cmd_rcgr = 0x09a0, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup1_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = { + .cmd_rcgr = 0x098c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup1_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = { + .cmd_rcgr = 0x0a20, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup2_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = { + .cmd_rcgr = 0x0a0c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup2_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = { + .cmd_rcgr = 0x0aa0, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup3_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = { + .cmd_rcgr = 0x0a8c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup3_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = { + .cmd_rcgr = 0x0b20, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup4_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = { + .cmd_rcgr = 0x0b0c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup4_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = { + .cmd_rcgr = 0x0ba0, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup5_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = { + .cmd_rcgr = 0x0b8c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup5_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = { + .cmd_rcgr = 0x0c20, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_i2c_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup6_i2c_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = { + .cmd_rcgr = 0x0c0c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blspqup_spi_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_qup6_spi_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_uart1_apps_clk_src = { + .cmd_rcgr = 0x09cc, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_uart1_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_uart2_apps_clk_src = { + .cmd_rcgr = 0x0a4c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_uart2_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_uart3_apps_clk_src = { + .cmd_rcgr = 0x0acc, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_uart3_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_uart4_apps_clk_src = { + .cmd_rcgr = 0x0b4c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_uart4_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_uart5_apps_clk_src = { + .cmd_rcgr = 0x0bcc, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_uart5_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 blsp2_uart6_apps_clk_src = { + .cmd_rcgr = 0x0c4c, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_blsp_uart_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "blsp2_uart6_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_gp1_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(100000000, P_GPLL0, 6, 0, 0), + F(200000000, P_GPLL0, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gp1_clk_src = { + .cmd_rcgr = 0x1904, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "gp1_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_gp2_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(100000000, P_GPLL0, 6, 0, 0), + F(200000000, P_GPLL0, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gp2_clk_src = { + .cmd_rcgr = 0x1944, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gp2_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "gp2_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_gp3_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(100000000, P_GPLL0, 6, 0, 0), + F(200000000, P_GPLL0, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gp3_clk_src = { + .cmd_rcgr = 0x1984, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_gp3_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "gp3_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_pcie_0_aux_clk_src[] = { + F(1011000, P_XO, 1, 1, 19), + { } +}; + +static struct clk_rcg2 pcie_0_aux_clk_src = { + .cmd_rcgr = 0x1b00, + .mnd_width = 8, + .hid_width = 5, + .freq_tbl = ftbl_pcie_0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "pcie_0_aux_clk_src", + .parent_names = (const char *[]) { "xo" }, + .num_parents = 1, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_pcie_pipe_clk_src[] = { + F(125000000, P_XO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 pcie_0_pipe_clk_src = { + .cmd_rcgr = 0x1adc, + .hid_width = 5, + .freq_tbl = ftbl_pcie_pipe_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "pcie_0_pipe_clk_src", + .parent_names = (const char *[]) { "xo" }, + .num_parents = 1, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_pcie_1_aux_clk_src[] = { + F(1011000, P_XO, 1, 1, 19), + { } +}; + +static struct clk_rcg2 pcie_1_aux_clk_src = { + .cmd_rcgr = 0x1b80, + .mnd_width = 8, + .hid_width = 5, + .freq_tbl = ftbl_pcie_1_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "pcie_1_aux_clk_src", + .parent_names = (const char *[]) { "xo" }, + .num_parents = 1, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 pcie_1_pipe_clk_src = { + .cmd_rcgr = 0x1b5c, + .hid_width = 5, + .freq_tbl = ftbl_pcie_pipe_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "pcie_1_pipe_clk_src", + .parent_names = (const char *[]) { "xo" }, + .num_parents = 1, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_pdm2_clk_src[] = { + F(60000000, P_GPLL0, 10, 0, 0), + { } +}; + +static struct clk_rcg2 pdm2_clk_src = { + .cmd_rcgr = 0x0cd0, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_pdm2_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "pdm2_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_sdcc1_apps_clk_src[] = { + F(144000, P_XO, 16, 3, 25), + F(400000, P_XO, 12, 1, 4), + F(20000000, P_GPLL0, 15, 1, 2), + F(25000000, P_GPLL0, 12, 1, 2), + F(50000000, P_GPLL0, 12, 0, 0), + F(100000000, P_GPLL0, 6, 0, 0), + F(192000000, P_GPLL4, 2, 0, 0), + F(384000000, P_GPLL4, 1, 0, 0), + { } +}; + +static struct clk_rcg2 sdcc1_apps_clk_src = { + .cmd_rcgr = 0x04d0, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_gpll4_map, + .freq_tbl = ftbl_sdcc1_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "sdcc1_apps_clk_src", + .parent_names = gcc_xo_gpll0_gpll4, + .num_parents = 3, + .ops = &clk_rcg2_floor_ops, + }, +}; + +static struct freq_tbl ftbl_sdcc2_4_apps_clk_src[] = { + F(144000, P_XO, 16, 3, 25), + F(400000, P_XO, 12, 1, 4), + F(20000000, P_GPLL0, 15, 1, 2), + F(25000000, P_GPLL0, 12, 1, 2), + F(50000000, P_GPLL0, 12, 0, 0), + F(100000000, P_GPLL0, 6, 0, 0), + F(200000000, P_GPLL0, 3, 0, 0), + { } +}; + +static struct clk_rcg2 sdcc2_apps_clk_src = { + .cmd_rcgr = 0x0510, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_sdcc2_4_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "sdcc2_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_floor_ops, + }, +}; + +static struct clk_rcg2 sdcc3_apps_clk_src = { + .cmd_rcgr = 0x0550, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_sdcc2_4_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "sdcc3_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_floor_ops, + }, +}; + +static struct clk_rcg2 sdcc4_apps_clk_src = { + .cmd_rcgr = 0x0590, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_sdcc2_4_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "sdcc4_apps_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_floor_ops, + }, +}; + +static struct freq_tbl ftbl_tsif_ref_clk_src[] = { + F(105500, P_XO, 1, 1, 182), + { } +}; + +static struct clk_rcg2 tsif_ref_clk_src = { + .cmd_rcgr = 0x0d90, + .mnd_width = 8, + .hid_width = 5, + .freq_tbl = ftbl_tsif_ref_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "tsif_ref_clk_src", + .parent_names = (const char *[]) { "xo" }, + .num_parents = 1, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(60000000, P_GPLL0, 10, 0, 0), + { } +}; + +static struct clk_rcg2 usb30_mock_utmi_clk_src = { + .cmd_rcgr = 0x03e8, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_usb30_mock_utmi_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "usb30_mock_utmi_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = { + F(1200000, P_XO, 16, 0, 0), + { } +}; + +static struct clk_rcg2 usb3_phy_aux_clk_src = { + .cmd_rcgr = 0x1414, + .hid_width = 5, + .freq_tbl = ftbl_usb3_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "usb3_phy_aux_clk_src", + .parent_names = (const char *[]) { "xo" }, + .num_parents = 1, + .ops = &clk_rcg2_ops, + }, +}; + +static struct freq_tbl ftbl_usb_hs_system_clk_src[] = { + F(75000000, P_GPLL0, 8, 0, 0), + { } +}; + +static struct clk_rcg2 usb_hs_system_clk_src = { + .cmd_rcgr = 0x0490, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_usb_hs_system_clk_src, + .clkr.hw.init = &(struct clk_init_data) + { + .name = "usb_hs_system_clk_src", + .parent_names = gcc_xo_gpll0, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_blsp1_ahb_clk = { + .halt_reg = 0x05c4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x1484, + .enable_mask = BIT(17), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = { + .halt_reg = 0x0648, + .clkr = { + .enable_reg = 0x0648, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup1_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup1_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = { + .halt_reg = 0x0644, + .clkr = { + .enable_reg = 0x0644, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup1_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup1_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = { + .halt_reg = 0x06c8, + .clkr = { + .enable_reg = 0x06c8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup2_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup2_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = { + .halt_reg = 0x06c4, + .clkr = { + .enable_reg = 0x06c4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup2_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup2_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = { + .halt_reg = 0x0748, + .clkr = { + .enable_reg = 0x0748, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup3_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup3_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = { + .halt_reg = 0x0744, + .clkr = { + .enable_reg = 0x0744, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup3_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup3_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = { + .halt_reg = 0x07c8, + .clkr = { + .enable_reg = 0x07c8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup4_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup4_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = { + .halt_reg = 0x07c4, + .clkr = { + .enable_reg = 0x07c4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup4_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup4_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = { + .halt_reg = 0x0848, + .clkr = { + .enable_reg = 0x0848, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup5_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup5_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = { + .halt_reg = 0x0844, + .clkr = { + .enable_reg = 0x0844, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup5_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup5_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = { + .halt_reg = 0x08c8, + .clkr = { + .enable_reg = 0x08c8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup6_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup6_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = { + .halt_reg = 0x08c4, + .clkr = { + .enable_reg = 0x08c4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_qup6_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp1_qup6_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart1_apps_clk = { + .halt_reg = 0x0684, + .clkr = { + .enable_reg = 0x0684, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_uart1_apps_clk", + .parent_names = (const char *[]) { + "blsp1_uart1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart2_apps_clk = { + .halt_reg = 0x0704, + .clkr = { + .enable_reg = 0x0704, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_uart2_apps_clk", + .parent_names = (const char *[]) { + "blsp1_uart2_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart3_apps_clk = { + .halt_reg = 0x0784, + .clkr = { + .enable_reg = 0x0784, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_uart3_apps_clk", + .parent_names = (const char *[]) { + "blsp1_uart3_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart4_apps_clk = { + .halt_reg = 0x0804, + .clkr = { + .enable_reg = 0x0804, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_uart4_apps_clk", + .parent_names = (const char *[]) { + "blsp1_uart4_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart5_apps_clk = { + .halt_reg = 0x0884, + .clkr = { + .enable_reg = 0x0884, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_uart5_apps_clk", + .parent_names = (const char *[]) { + "blsp1_uart5_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp1_uart6_apps_clk = { + .halt_reg = 0x0904, + .clkr = { + .enable_reg = 0x0904, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp1_uart6_apps_clk", + .parent_names = (const char *[]) { + "blsp1_uart6_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_ahb_clk = { + .halt_reg = 0x0944, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x1484, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = { + .halt_reg = 0x0988, + .clkr = { + .enable_reg = 0x0988, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup1_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup1_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = { + .halt_reg = 0x0984, + .clkr = { + .enable_reg = 0x0984, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup1_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup1_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = { + .halt_reg = 0x0a08, + .clkr = { + .enable_reg = 0x0a08, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup2_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup2_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = { + .halt_reg = 0x0a04, + .clkr = { + .enable_reg = 0x0a04, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup2_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup2_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = { + .halt_reg = 0x0a88, + .clkr = { + .enable_reg = 0x0a88, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup3_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup3_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = { + .halt_reg = 0x0a84, + .clkr = { + .enable_reg = 0x0a84, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup3_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup3_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = { + .halt_reg = 0x0b08, + .clkr = { + .enable_reg = 0x0b08, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup4_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup4_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = { + .halt_reg = 0x0b04, + .clkr = { + .enable_reg = 0x0b04, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup4_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup4_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = { + .halt_reg = 0x0b88, + .clkr = { + .enable_reg = 0x0b88, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup5_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup5_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = { + .halt_reg = 0x0b84, + .clkr = { + .enable_reg = 0x0b84, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup5_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup5_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = { + .halt_reg = 0x0c08, + .clkr = { + .enable_reg = 0x0c08, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup6_i2c_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup6_i2c_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = { + .halt_reg = 0x0c04, + .clkr = { + .enable_reg = 0x0c04, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_qup6_spi_apps_clk", + .parent_names = (const char *[]) { + "blsp2_qup6_spi_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_uart1_apps_clk = { + .halt_reg = 0x09c4, + .clkr = { + .enable_reg = 0x09c4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_uart1_apps_clk", + .parent_names = (const char *[]) { + "blsp2_uart1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_uart2_apps_clk = { + .halt_reg = 0x0a44, + .clkr = { + .enable_reg = 0x0a44, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_uart2_apps_clk", + .parent_names = (const char *[]) { + "blsp2_uart2_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_uart3_apps_clk = { + .halt_reg = 0x0ac4, + .clkr = { + .enable_reg = 0x0ac4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_uart3_apps_clk", + .parent_names = (const char *[]) { + "blsp2_uart3_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_uart4_apps_clk = { + .halt_reg = 0x0b44, + .clkr = { + .enable_reg = 0x0b44, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_uart4_apps_clk", + .parent_names = (const char *[]) { + "blsp2_uart4_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_uart5_apps_clk = { + .halt_reg = 0x0bc4, + .clkr = { + .enable_reg = 0x0bc4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_uart5_apps_clk", + .parent_names = (const char *[]) { + "blsp2_uart5_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_blsp2_uart6_apps_clk = { + .halt_reg = 0x0c44, + .clkr = { + .enable_reg = 0x0c44, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_blsp2_uart6_apps_clk", + .parent_names = (const char *[]) { + "blsp2_uart6_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x1900, + .clkr = { + .enable_reg = 0x1900, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_gp1_clk", + .parent_names = (const char *[]) { + "gp1_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x1940, + .clkr = { + .enable_reg = 0x1940, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_gp2_clk", + .parent_names = (const char *[]) { + "gp2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x1980, + .clkr = { + .enable_reg = 0x1980, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_gp3_clk", + .parent_names = (const char *[]) { + "gp3_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_aux_clk = { + .halt_reg = 0x1ad4, + .clkr = { + .enable_reg = 0x1ad4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_pcie_0_aux_clk", + .parent_names = (const char *[]) { + "pcie_0_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_pipe_clk = { + .halt_reg = 0x1ad8, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x1ad8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_pcie_0_pipe_clk", + .parent_names = (const char *[]) { + "pcie_0_pipe_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_aux_clk = { + .halt_reg = 0x1b54, + .clkr = { + .enable_reg = 0x1b54, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_pcie_1_aux_clk", + .parent_names = (const char *[]) { + "pcie_1_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_pipe_clk = { + .halt_reg = 0x1b58, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x1b58, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_pcie_1_pipe_clk", + .parent_names = (const char *[]) { + "pcie_1_pipe_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x0ccc, + .clkr = { + .enable_reg = 0x0ccc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_pdm2_clk", + .parent_names = (const char *[]) { + "pdm2_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc1_apps_clk = { + .halt_reg = 0x04c4, + .clkr = { + .enable_reg = 0x04c4, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_sdcc1_apps_clk", + .parent_names = (const char *[]) { + "sdcc1_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_apps_clk = { + .halt_reg = 0x0504, + .clkr = { + .enable_reg = 0x0504, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_sdcc2_apps_clk", + .parent_names = (const char *[]) { + "sdcc2_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc3_apps_clk = { + .halt_reg = 0x0544, + .clkr = { + .enable_reg = 0x0544, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_sdcc3_apps_clk", + .parent_names = (const char *[]) { + "sdcc3_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc4_apps_clk = { + .halt_reg = 0x0584, + .clkr = { + .enable_reg = 0x0584, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_sdcc4_apps_clk", + .parent_names = (const char *[]) { + "sdcc4_apps_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sys_noc_ufs_axi_clk = { + .halt_reg = 0x1d7c, + .clkr = { + .enable_reg = 0x1d7c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_sys_noc_ufs_axi_clk", + .parent_names = (const char *[]) { + "ufs_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sys_noc_usb3_axi_clk = { + .halt_reg = 0x03fc, + .clkr = { + .enable_reg = 0x03fc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_sys_noc_usb3_axi_clk", + .parent_names = (const char *[]) { + "usb30_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_tsif_ref_clk = { + .halt_reg = 0x0d88, + .clkr = { + .enable_reg = 0x0d88, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_tsif_ref_clk", + .parent_names = (const char *[]) { + "tsif_ref_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_axi_clk = { + .halt_reg = 0x1d48, + .clkr = { + .enable_reg = 0x1d48, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_ufs_axi_clk", + .parent_names = (const char *[]) { + "ufs_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_rx_cfg_clk = { + .halt_reg = 0x1d54, + .clkr = { + .enable_reg = 0x1d54, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_ufs_rx_cfg_clk", + .parent_names = (const char *[]) { + "ufs_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_tx_cfg_clk = { + .halt_reg = 0x1d50, + .clkr = { + .enable_reg = 0x1d50, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_ufs_tx_cfg_clk", + .parent_names = (const char *[]) { + "ufs_axi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_master_clk = { + .halt_reg = 0x03c8, + .clkr = { + .enable_reg = 0x03c8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_usb30_master_clk", + .parent_names = (const char *[]) { + "usb30_master_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_mock_utmi_clk = { + .halt_reg = 0x03d0, + .clkr = { + .enable_reg = 0x03d0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_usb30_mock_utmi_clk", + .parent_names = (const char *[]) { + "usb30_mock_utmi_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_phy_aux_clk = { + .halt_reg = 0x1408, + .clkr = { + .enable_reg = 0x1408, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_usb3_phy_aux_clk", + .parent_names = (const char *[]) { + "usb3_phy_aux_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb_hs_system_clk = { + .halt_reg = 0x0484, + .clkr = { + .enable_reg = 0x0484, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data) + { + .name = "gcc_usb_hs_system_clk", + .parent_names = (const char *[]) { + "usb_hs_system_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *gcc_msm8994_clocks[] = { + [GPLL0_EARLY] = &gpll0_early.clkr, + [GPLL0] = &gpll0.clkr, + [GPLL4_EARLY] = &gpll4_early.clkr, + [GPLL4] = &gpll4.clkr, + [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr, + [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr, + [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr, + [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr, + [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr, + [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr, + [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr, + [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr, + [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr, + [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr, + [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr, + [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr, + [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr, + [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr, + [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr, + [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr, + [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr, + [BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr, + [BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr, + [BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr, + [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr, + [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr, + [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr, + [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr, + [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr, + [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr, + [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr, + [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr, + [BLSP2_QUP5_I2C_APPS_CLK_SRC] = &blsp2_qup5_i2c_apps_clk_src.clkr, + [BLSP2_QUP5_SPI_APPS_CLK_SRC] = &blsp2_qup5_spi_apps_clk_src.clkr, + [BLSP2_QUP6_I2C_APPS_CLK_SRC] = &blsp2_qup6_i2c_apps_clk_src.clkr, + [BLSP2_QUP6_SPI_APPS_CLK_SRC] = &blsp2_qup6_spi_apps_clk_src.clkr, + [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr, + [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr, + [BLSP2_UART3_APPS_CLK_SRC] = &blsp2_uart3_apps_clk_src.clkr, + [BLSP2_UART4_APPS_CLK_SRC] = &blsp2_uart4_apps_clk_src.clkr, + [BLSP2_UART5_APPS_CLK_SRC] = &blsp2_uart5_apps_clk_src.clkr, + [BLSP2_UART6_APPS_CLK_SRC] = &blsp2_uart6_apps_clk_src.clkr, + [GP1_CLK_SRC] = &gp1_clk_src.clkr, + [GP2_CLK_SRC] = &gp2_clk_src.clkr, + [GP3_CLK_SRC] = &gp3_clk_src.clkr, + [PCIE_0_AUX_CLK_SRC] = &pcie_0_aux_clk_src.clkr, + [PCIE_0_PIPE_CLK_SRC] = &pcie_0_pipe_clk_src.clkr, + [PCIE_1_AUX_CLK_SRC] = &pcie_1_aux_clk_src.clkr, + [PCIE_1_PIPE_CLK_SRC] = &pcie_1_pipe_clk_src.clkr, + [PDM2_CLK_SRC] = &pdm2_clk_src.clkr, + [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr, + [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr, + [SDCC3_APPS_CLK_SRC] = &sdcc3_apps_clk_src.clkr, + [SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr, + [TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr, + [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr, + [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr, + [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr, + [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr, + [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr, + [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr, + [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr, + [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr, + [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr, + [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr, + [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr, + [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr, + [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr, + [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr, + [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr, + [GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr, + [GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr, + [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr, + [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr, + [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr, + [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr, + [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr, + [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr, + [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr, + [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr, + [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr, + [GCC_BLSP2_QUP5_I2C_APPS_CLK] = &gcc_blsp2_qup5_i2c_apps_clk.clkr, + [GCC_BLSP2_QUP5_SPI_APPS_CLK] = &gcc_blsp2_qup5_spi_apps_clk.clkr, + [GCC_BLSP2_QUP6_I2C_APPS_CLK] = &gcc_blsp2_qup6_i2c_apps_clk.clkr, + [GCC_BLSP2_QUP6_SPI_APPS_CLK] = &gcc_blsp2_qup6_spi_apps_clk.clkr, + [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr, + [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr, + [GCC_BLSP2_UART3_APPS_CLK] = &gcc_blsp2_uart3_apps_clk.clkr, + [GCC_BLSP2_UART4_APPS_CLK] = &gcc_blsp2_uart4_apps_clk.clkr, + [GCC_BLSP2_UART5_APPS_CLK] = &gcc_blsp2_uart5_apps_clk.clkr, + [GCC_BLSP2_UART6_APPS_CLK] = &gcc_blsp2_uart6_apps_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr, + [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr, + [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr, + [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr, + [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, + [GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr, + [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr, + [GCC_SYS_NOC_UFS_AXI_CLK] = &gcc_sys_noc_ufs_axi_clk.clkr, + [GCC_SYS_NOC_USB3_AXI_CLK] = &gcc_sys_noc_usb3_axi_clk.clkr, + [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr, + [GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr, + [GCC_UFS_RX_CFG_CLK] = &gcc_ufs_rx_cfg_clk.clkr, + [GCC_UFS_TX_CFG_CLK] = &gcc_ufs_tx_cfg_clk.clkr, + [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr, + [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr, + [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr, + [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr, +}; + +static const struct regmap_config gcc_msm8994_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x2000, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_msm8994_desc = { + .config = &gcc_msm8994_regmap_config, + .clks = gcc_msm8994_clocks, + .num_clks = ARRAY_SIZE(gcc_msm8994_clocks), +}; + +static const struct of_device_id gcc_msm8994_match_table[] = { + { .compatible = "qcom,gcc-msm8994" }, + {} +}; +MODULE_DEVICE_TABLE(of, gcc_msm8994_match_table); + +static int gcc_msm8994_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct clk *clk; + + clk = devm_clk_register(dev, &xo.hw); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + return qcom_cc_probe(pdev, &gcc_msm8994_desc); +} + +static struct platform_driver gcc_msm8994_driver = { + .probe = gcc_msm8994_probe, + .driver = { + .name = "gcc-msm8994", + .of_match_table = gcc_msm8994_match_table, + }, +}; + +static int __init gcc_msm8994_init(void) +{ + return platform_driver_register(&gcc_msm8994_driver); +} +core_initcall(gcc_msm8994_init); + +static void __exit gcc_msm8994_exit(void) +{ + platform_driver_unregister(&gcc_msm8994_driver); +} +module_exit(gcc_msm8994_exit); + +MODULE_DESCRIPTION("Qualcomm GCC MSM8994 Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:gcc-msm8994"); diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index fe03e6fbc7df..4b1fc1730d29 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -460,14 +460,22 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { .name = "sdcc1_apps_clk_src", .parent_names = gcc_xo_gpll0_gpll4_gpll0_early_div, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; +static struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(150000000, P_GPLL0, 4, 0, 0), + F(300000000, P_GPLL0, 2, 0, 0), + { } +}; + static struct clk_rcg2 sdcc1_ice_core_clk_src = { .cmd_rcgr = 0x13024, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll4_gpll0_early_div_map, + .freq_tbl = ftbl_sdcc1_ice_core_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "sdcc1_ice_core_clk_src", .parent_names = gcc_xo_gpll0_gpll4_gpll0_early_div, @@ -497,7 +505,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = { .name = "sdcc2_apps_clk_src", .parent_names = gcc_xo_gpll0_gpll4, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -511,7 +519,7 @@ static struct clk_rcg2 sdcc3_apps_clk_src = { .name = "sdcc3_apps_clk_src", .parent_names = gcc_xo_gpll0_gpll4, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -535,7 +543,7 @@ static struct clk_rcg2 sdcc4_apps_clk_src = { .name = "sdcc4_apps_clk_src", .parent_names = gcc_xo_gpll0, .num_parents = 2, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1230,10 +1238,18 @@ static struct clk_rcg2 ufs_axi_clk_src = { }, }; +static const struct freq_tbl ftbl_ufs_ice_core_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(150000000, P_GPLL0, 4, 0, 0), + F(300000000, P_GPLL0, 2, 0, 0), + { } +}; + static struct clk_rcg2 ufs_ice_core_clk_src = { .cmd_rcgr = 0x76014, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, + .freq_tbl = ftbl_ufs_ice_core_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "ufs_ice_core_clk_src", .parent_names = gcc_xo_gpll0, @@ -1242,10 +1258,19 @@ static struct clk_rcg2 ufs_ice_core_clk_src = { }, }; +static const struct freq_tbl ftbl_qspi_ser_clk_src[] = { + F(75000000, P_GPLL0, 8, 0, 0), + F(150000000, P_GPLL0, 4, 0, 0), + F(256000000, P_GPLL4, 1.5, 0, 0), + F(300000000, P_GPLL0, 2, 0, 0), + { } +}; + static struct clk_rcg2 qspi_ser_clk_src = { .cmd_rcgr = 0x8b00c, .hid_width = 5, .parent_map = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map, + .freq_tbl = ftbl_qspi_ser_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "qspi_ser_clk_src", .parent_names = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div, diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index f12d7b2bddd7..e9c8cc05969a 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -30,6 +30,7 @@ #define SW_OVERRIDE_MASK BIT(2) #define HW_CONTROL_MASK BIT(1) #define SW_COLLAPSE_MASK BIT(0) +#define GMEM_CLAMP_IO_MASK BIT(0) /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ #define EN_REST_WAIT_VAL (0x2 << 20) @@ -38,6 +39,7 @@ #define RETAIN_MEM BIT(14) #define RETAIN_PERIPH BIT(13) +#define OFF_PERIPH BIT(12) #define TIMEOUT_US 100 @@ -55,6 +57,13 @@ static int gdsc_is_enabled(struct gdsc *sc, unsigned int reg) return !!(val & PWR_ON_MASK); } +static int gdsc_hwctrl(struct gdsc *sc, bool en) +{ + u32 val = en ? HW_CONTROL_MASK : 0; + + return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val); +} + static int gdsc_toggle_logic(struct gdsc *sc, bool en) { int ret; @@ -122,24 +131,68 @@ static inline int gdsc_assert_reset(struct gdsc *sc) return 0; } -static inline void gdsc_force_mem_on(struct gdsc *sc) +static inline void gdsc_force_mem_core_on(struct gdsc *sc) { int i; - u32 mask = RETAIN_MEM | RETAIN_PERIPH; + u32 mask = RETAIN_MEM; for (i = 0; i < sc->cxc_count; i++) regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask); } -static inline void gdsc_clear_mem_on(struct gdsc *sc) +static inline void gdsc_force_mem_periph_on(struct gdsc *sc) { int i; - u32 mask = RETAIN_MEM | RETAIN_PERIPH; + u32 mask = RETAIN_PERIPH | OFF_PERIPH; + u32 val = RETAIN_PERIPH; + + for (i = 0; i < sc->cxc_count; i++) + regmap_update_bits(sc->regmap, sc->cxcs[i], mask, val); +} + +static inline void gdsc_force_mem_on(struct gdsc *sc) +{ + gdsc_force_mem_core_on(sc); + gdsc_force_mem_periph_on(sc); +} + +static inline void gdsc_clear_mem_core_on(struct gdsc *sc) +{ + int i; + u32 mask = RETAIN_MEM; for (i = 0; i < sc->cxc_count; i++) regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0); } +static inline void gdsc_deassert_clamp_io(struct gdsc *sc) +{ + regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, + GMEM_CLAMP_IO_MASK, 0); +} + +static inline void gdsc_assert_clamp_io(struct gdsc *sc) +{ + regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, + GMEM_CLAMP_IO_MASK, 1); +} + +static inline void gdsc_clear_mem_periph_on(struct gdsc *sc) +{ + int i; + u32 mask = RETAIN_PERIPH | OFF_PERIPH; + u32 val = OFF_PERIPH; + + for (i = 0; i < sc->cxc_count; i++) + regmap_update_bits(sc->regmap, sc->cxcs[i], mask, val); +} + +static inline void gdsc_clear_mem_on(struct gdsc *sc) +{ + gdsc_clear_mem_core_on(sc); + gdsc_clear_mem_periph_on(sc); +} + static int gdsc_enable(struct generic_pm_domain *domain) { struct gdsc *sc = domain_to_gdsc(domain); @@ -148,12 +201,14 @@ static int gdsc_enable(struct generic_pm_domain *domain) if (sc->pwrsts == PWRSTS_ON) return gdsc_deassert_reset(sc); + if (sc->flags & CLAMP_IO) + gdsc_deassert_clamp_io(sc); + ret = gdsc_toggle_logic(sc, true); if (ret) return ret; - if (sc->pwrsts & PWRSTS_OFF) - gdsc_force_mem_on(sc); + gdsc_force_mem_on(sc); /* * If clocks to this power domain were already on, they will take an @@ -164,20 +219,62 @@ static int gdsc_enable(struct generic_pm_domain *domain) */ udelay(1); + /* Turn on HW trigger mode if supported */ + if (sc->flags & HW_CTRL) + return gdsc_hwctrl(sc, true); + return 0; } static int gdsc_disable(struct generic_pm_domain *domain) { struct gdsc *sc = domain_to_gdsc(domain); + int ret; + u8 pwrst; if (sc->pwrsts == PWRSTS_ON) return gdsc_assert_reset(sc); - if (sc->pwrsts & PWRSTS_OFF) + /* Turn off HW trigger mode if supported */ + if (sc->flags & HW_CTRL) { + ret = gdsc_hwctrl(sc, false); + if (ret < 0) + return ret; + } + + if (domain->state_count > 1) + pwrst = 1 << domain->state_idx; + else if (sc->pwrsts & PWRSTS_OFF) + pwrst = PWRSTS_OFF; + else + pwrst = PWRSTS_RET; + + switch (pwrst) { + case PWRSTS_OFF: gdsc_clear_mem_on(sc); + break; + case PWRSTS_RET: + if (sc->pwrsts_ret == PWRSTS_RET_ALL) + gdsc_force_mem_on(sc); + else if (sc->pwrsts_ret == PWRSTS_RET_MEM) + gdsc_force_mem_core_on(sc); + else if (sc->pwrsts_ret == PWRSTS_RET_PERIPH) + gdsc_force_mem_periph_on(sc); + else + return -EINVAL; + break; + default: + return -EINVAL; + }; + + ret = gdsc_toggle_logic(sc, false); + if (ret) + return ret; - return gdsc_toggle_logic(sc, false); + if (sc->flags & CLAMP_IO) + gdsc_assert_clamp_io(sc); + + return 0; } static int gdsc_init(struct gdsc *sc) @@ -198,6 +295,9 @@ static int gdsc_init(struct gdsc *sc) if (ret) return ret; + if (!sc->pwrsts) + return -EINVAL; + /* Force gdsc ON if only ON state is supported */ if (sc->pwrsts == PWRSTS_ON) { ret = gdsc_toggle_logic(sc, true); @@ -217,14 +317,15 @@ static int gdsc_init(struct gdsc *sc) if ((sc->flags & VOTABLE) && on) gdsc_enable(&sc->pd); - if (on || (sc->pwrsts & PWRSTS_RET)) + if (on) gdsc_force_mem_on(sc); - else - gdsc_clear_mem_on(sc); sc->pd.power_off = gdsc_disable; sc->pd.power_on = gdsc_enable; - pm_genpd_init(&sc->pd, NULL, !on); + if (sc->pd.state_count) + pm_genpd_init(&sc->pd, &simple_qos_governor, !on); + else + pm_genpd_init(&sc->pd, NULL, !on); return 0; } diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h index 3bf497c36bdf..417309cffa03 100644 --- a/drivers/clk/qcom/gdsc.h +++ b/drivers/clk/qcom/gdsc.h @@ -39,17 +39,28 @@ struct gdsc { struct regmap *regmap; unsigned int gdscr; unsigned int gds_hw_ctrl; + unsigned int clamp_io_ctrl; unsigned int *cxcs; unsigned int cxc_count; +/* supported options for pwrsts */ +#define PWRSTS_RET BIT(0) +#define PWRSTS_OFF BIT(1) +#define PWRSTS_ON BIT(2) +#define PWRSTS_MAX 3 +#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON) +#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON) +#define PWRSTS_OFF_RET_ON (PWRSTS_OFF | PWRSTS_RET | PWRSTS_ON) const u8 pwrsts; -/* Powerdomain allowable state bitfields */ -#define PWRSTS_OFF BIT(0) -#define PWRSTS_RET BIT(1) -#define PWRSTS_ON BIT(2) -#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON) -#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON) +/* supported options for pwrsts_ret */ +#define PWRSTS_RET_ALL 0 /* default retains all */ +#define PWRSTS_RET_MEM BIT(0) +#define PWRSTS_RET_PERIPH BIT(1) + const u8 pwrsts_ret; +/* supported flags */ +#define VOTABLE BIT(0) +#define CLAMP_IO BIT(1) +#define HW_CTRL BIT(2) const u8 flags; -#define VOTABLE BIT(0) struct reset_controller_dev *rcdev; unsigned int *resets; unsigned int reset_count; diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c new file mode 100644 index 000000000000..9eed519cf1c7 --- /dev/null +++ b/drivers/clk/qcom/hfpll.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/regmap.h> + +#include "clk-regmap.h" +#include "clk-hfpll.h" + +static const struct hfpll_data hdata = { + .mode_reg = 0x00, + .l_reg = 0x04, + .m_reg = 0x08, + .n_reg = 0x0c, + .user_reg = 0x10, + .config_reg = 0x14, + .config_val = 0x430405d, + .status_reg = 0x1c, + .lock_bit = 16, + + .user_val = 0x8, + .user_vco_mask = 0x100000, + .low_vco_max_rate = 1248000000, + .min_rate = 537600000UL, + .max_rate = 2900000000UL, +}; + +static const struct of_device_id qcom_hfpll_match_table[] = { + { .compatible = "qcom,hfpll" }, + { } +}; +MODULE_DEVICE_TABLE(of, qcom_hfpll_match_table); + +static const struct regmap_config hfpll_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x30, + .fast_io = true, +}; + +static int qcom_hfpll_probe(struct platform_device *pdev) +{ + struct resource *res; + struct device *dev = &pdev->dev; + void __iomem *base; + struct regmap *regmap; + struct clk_hfpll *h; + struct clk_init_data init = { + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_ops_hfpll, + }; + + h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL); + if (!h) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + regmap = devm_regmap_init_mmio(&pdev->dev, base, &hfpll_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + if (of_property_read_string_index(dev->of_node, "clock-output-names", + 0, &init.name)) + return -ENODEV; + + h->d = &hdata; + h->clkr.hw.init = &init; + spin_lock_init(&h->lock); + + return devm_clk_register_regmap(&pdev->dev, &h->clkr); +} + +static struct platform_driver qcom_hfpll_driver = { + .probe = qcom_hfpll_probe, + .driver = { + .name = "qcom-hfpll", + .of_match_table = qcom_hfpll_match_table, + }, +}; +module_platform_driver(qcom_hfpll_driver); + +MODULE_DESCRIPTION("QCOM HFPLL Clock Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:qcom-hfpll"); diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c new file mode 100644 index 000000000000..abf6bfd053c1 --- /dev/null +++ b/drivers/clk/qcom/kpss-xcc.c @@ -0,0 +1,95 @@ +/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> + +static const char *aux_parents[] = { + "pll8_vote", + "pxo", +}; + +static unsigned int aux_parent_map[] = { + 3, + 0, +}; + +static const struct of_device_id kpss_xcc_match_table[] = { + { .compatible = "qcom,kpss-acc-v1", .data = (void *)1UL }, + { .compatible = "qcom,kpss-gcc" }, + {} +}; +MODULE_DEVICE_TABLE(of, kpss_xcc_match_table); + +static int kpss_xcc_driver_probe(struct platform_device *pdev) +{ + const struct of_device_id *id; + struct clk *clk; + struct resource *res; + void __iomem *base; + const char *name; + + id = of_match_device(kpss_xcc_match_table, &pdev->dev); + if (!id) + return -ENODEV; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + if (id->data) { + if (of_property_read_string_index(pdev->dev.of_node, + "clock-output-names", 0, &name)) + return -ENODEV; + base += 0x14; + } else { + name = "acpu_l2_aux"; + base += 0x28; + } + + clk = clk_register_mux_table(&pdev->dev, name, aux_parents, + ARRAY_SIZE(aux_parents), 0, base, 0, 0x3, + 0, aux_parent_map, NULL); + + platform_set_drvdata(pdev, clk); + + return PTR_ERR_OR_ZERO(clk); +} + +static int kpss_xcc_driver_remove(struct platform_device *pdev) +{ + clk_unregister_mux(platform_get_drvdata(pdev)); + return 0; +} + +static struct platform_driver kpss_xcc_driver = { + .probe = kpss_xcc_driver_probe, + .remove = kpss_xcc_driver_remove, + .driver = { + .name = "kpss-xcc", + .of_match_table = kpss_xcc_match_table, + }, +}; +module_platform_driver(kpss_xcc_driver); + +MODULE_DESCRIPTION("Krait Processor Sub System (KPSS) Clock Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:kpss-xcc"); diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c new file mode 100644 index 000000000000..03320110848f --- /dev/null +++ b/drivers/clk/qcom/krait-cc.c @@ -0,0 +1,352 @@ +/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/slab.h> + +#include "clk-krait.h" + +static unsigned int sec_mux_map[] = { + 2, + 0, +}; + +static unsigned int pri_mux_map[] = { + 1, + 2, + 0, +}; + +static int +krait_add_div(struct device *dev, int id, const char *s, unsigned offset) +{ + struct krait_div2_clk *div; + struct clk_init_data init = { + .num_parents = 1, + .ops = &krait_div2_clk_ops, + .flags = CLK_SET_RATE_PARENT, + }; + const char *p_names[1]; + struct clk *clk; + + div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL); + if (!div) + return -ENOMEM; + + div->width = 2; + div->shift = 6; + div->lpl = id >= 0; + div->offset = offset; + div->hw.init = &init; + + init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s); + if (!init.name) + return -ENOMEM; + + init.parent_names = p_names; + p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s); + if (!p_names[0]) { + kfree(init.name); + return -ENOMEM; + } + + clk = devm_clk_register(dev, &div->hw); + kfree(p_names[0]); + kfree(init.name); + + return PTR_ERR_OR_ZERO(clk); +} + +static int +krait_add_sec_mux(struct device *dev, int id, const char *s, unsigned offset, + bool unique_aux) +{ + struct krait_mux_clk *mux; + static const char *sec_mux_list[] = { + "acpu_aux", + "qsb", + }; + struct clk_init_data init = { + .parent_names = sec_mux_list, + .num_parents = ARRAY_SIZE(sec_mux_list), + .ops = &krait_mux_clk_ops, + .flags = CLK_SET_RATE_PARENT, + }; + struct clk *clk; + + mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); + if (!mux) + return -ENOMEM; + + mux->offset = offset; + mux->lpl = id >= 0; + mux->has_safe_parent = true; + mux->safe_sel = 2; + mux->mask = 0x3; + mux->shift = 2; + mux->parent_map = sec_mux_map; + mux->hw.init = &init; + + init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s); + if (!init.name) + return -ENOMEM; + + if (unique_aux) { + sec_mux_list[0] = kasprintf(GFP_KERNEL, "acpu%s_aux", s); + if (!sec_mux_list[0]) { + clk = ERR_PTR(-ENOMEM); + goto err_aux; + } + } + + clk = devm_clk_register(dev, &mux->hw); + + if (unique_aux) + kfree(sec_mux_list[0]); +err_aux: + kfree(init.name); + return PTR_ERR_OR_ZERO(clk); +} + +static struct clk * +krait_add_pri_mux(struct device *dev, int id, const char *s, unsigned offset) +{ + struct krait_mux_clk *mux; + const char *p_names[3]; + struct clk_init_data init = { + .parent_names = p_names, + .num_parents = ARRAY_SIZE(p_names), + .ops = &krait_mux_clk_ops, + .flags = CLK_SET_RATE_PARENT, + }; + struct clk *clk; + + mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + mux->has_safe_parent = true; + mux->safe_sel = 0; + mux->mask = 0x3; + mux->shift = 0; + mux->offset = offset; + mux->lpl = id >= 0; + mux->parent_map = pri_mux_map; + mux->hw.init = &init; + + init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s); + if (!init.name) + return ERR_PTR(-ENOMEM); + + p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s); + if (!p_names[0]) { + clk = ERR_PTR(-ENOMEM); + goto err_p0; + } + + p_names[1] = kasprintf(GFP_KERNEL, "hfpll%s_div", s); + if (!p_names[1]) { + clk = ERR_PTR(-ENOMEM); + goto err_p1; + } + + p_names[2] = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s); + if (!p_names[2]) { + clk = ERR_PTR(-ENOMEM); + goto err_p2; + } + + clk = devm_clk_register(dev, &mux->hw); + + kfree(p_names[2]); +err_p2: + kfree(p_names[1]); +err_p1: + kfree(p_names[0]); +err_p0: + kfree(init.name); + return clk; +} + +/* id < 0 for L2, otherwise id == physical CPU number */ +static struct clk *krait_add_clks(struct device *dev, int id, bool unique_aux) +{ + int ret; + unsigned offset; + void *p = NULL; + const char *s; + struct clk *clk; + + if (id >= 0) { + offset = 0x4501 + (0x1000 * id); + s = p = kasprintf(GFP_KERNEL, "%d", id); + if (!s) + return ERR_PTR(-ENOMEM); + } else { + offset = 0x500; + s = "_l2"; + } + + ret = krait_add_div(dev, id, s, offset); + if (ret) { + clk = ERR_PTR(ret); + goto err; + } + + ret = krait_add_sec_mux(dev, id, s, offset, unique_aux); + if (ret) { + clk = ERR_PTR(ret); + goto err; + } + + clk = krait_add_pri_mux(dev, id, s, offset); +err: + kfree(p); + return clk; +} + +static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data) +{ + unsigned int idx = clkspec->args[0]; + struct clk **clks = data; + + if (idx >= 5) { + pr_err("%s: invalid clock index %d\n", __func__, idx); + return ERR_PTR(-EINVAL); + } + + return clks[idx] ? : ERR_PTR(-ENODEV); +} + +static const struct of_device_id krait_cc_match_table[] = { + { .compatible = "qcom,krait-cc-v1", (void *)1UL }, + { .compatible = "qcom,krait-cc-v2" }, + {} +}; +MODULE_DEVICE_TABLE(of, krait_cc_match_table); + +static int krait_cc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct of_device_id *id; + unsigned long cur_rate, aux_rate; + int cpu; + struct clk *clk; + struct clk **clks; + struct clk *l2_pri_mux_clk; + + id = of_match_device(krait_cc_match_table, dev); + if (!id) + return -ENODEV; + + /* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */ + clk = clk_register_fixed_rate(dev, "qsb", NULL, 0, 1); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + if (!id->data) { + clk = clk_register_fixed_factor(dev, "acpu_aux", + "gpll0_vote", 0, 1, 2); + if (IS_ERR(clk)) + return PTR_ERR(clk); + } + + /* Krait configurations have at most 4 CPUs and one L2 */ + clks = devm_kcalloc(dev, 5, sizeof(*clks), GFP_KERNEL); + if (!clks) + return -ENOMEM; + + for_each_possible_cpu(cpu) { + clk = krait_add_clks(dev, cpu, id->data); + if (IS_ERR(clk)) + return PTR_ERR(clk); + clks[cpu] = clk; + } + + l2_pri_mux_clk = krait_add_clks(dev, -1, id->data); + if (IS_ERR(l2_pri_mux_clk)) + return PTR_ERR(l2_pri_mux_clk); + clks[4] = l2_pri_mux_clk; + + /* + * We don't want the CPU or L2 clocks to be turned off at late init + * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the + * refcount of these clocks. Any cpufreq/hotplug manager can assume + * that the clocks have already been prepared and enabled by the time + * they take over. + */ + for_each_online_cpu(cpu) { + clk_prepare_enable(l2_pri_mux_clk); + WARN(clk_prepare_enable(clks[cpu]), + "Unable to turn on CPU%d clock", cpu); + } + + /* + * Force reinit of HFPLLs and muxes to overwrite any potential + * incorrect configuration of HFPLLs and muxes by the bootloader. + * While at it, also make sure the cores are running at known rates + * and print the current rate. + * + * The clocks are set to aux clock rate first to make sure the + * secondary mux is not sourcing off of QSB. The rate is then set to + * two different rates to force a HFPLL reinit under all + * circumstances. + */ + cur_rate = clk_get_rate(l2_pri_mux_clk); + aux_rate = 384000000; + if (cur_rate == 1) { + pr_info("L2 @ QSB rate. Forcing new rate.\n"); + cur_rate = aux_rate; + } + clk_set_rate(l2_pri_mux_clk, aux_rate); + clk_set_rate(l2_pri_mux_clk, 2); + clk_set_rate(l2_pri_mux_clk, cur_rate); + pr_info("L2 @ %lu KHz\n", clk_get_rate(l2_pri_mux_clk) / 1000); + for_each_possible_cpu(cpu) { + clk = clks[cpu]; + cur_rate = clk_get_rate(clk); + if (cur_rate == 1) { + pr_info("CPU%d @ QSB rate. Forcing new rate.\n", cpu); + cur_rate = aux_rate; + } + clk_set_rate(clk, aux_rate); + clk_set_rate(clk, 2); + clk_set_rate(clk, cur_rate); + pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000); + } + + of_clk_add_provider(dev->of_node, krait_of_get, clks); + + return 0; +} + +static struct platform_driver krait_cc_driver = { + .probe = krait_cc_probe, + .driver = { + .name = "krait-cc", + .of_match_table = krait_cc_match_table, + }, +}; +module_platform_driver(krait_cc_driver); + +MODULE_DESCRIPTION("Krait CPU Clock Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:krait-cc"); diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c index db3998e5e2d8..977e98eadbeb 100644 --- a/drivers/clk/qcom/lcc-ipq806x.c +++ b/drivers/clk/qcom/lcc-ipq806x.c @@ -443,7 +443,7 @@ static int lcc_ipq806x_probe(struct platform_device *pdev) return PTR_ERR(regmap); /* Configure the rate of PLL4 if the bootloader hasn't already */ - val = regmap_read(regmap, 0x0, &val); + regmap_read(regmap, 0x0, &val); if (!val) clk_pll_configure_sr(&pll4, regmap, &pll4_config, true); /* Enable PLL4 source on the LPASS Primary PLL Mux */ diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c index 30777f9f1a43..df8347bb4111 100644 --- a/drivers/clk/qcom/mmcc-apq8084.c +++ b/drivers/clk/qcom/mmcc-apq8084.c @@ -3390,6 +3390,15 @@ static int mmcc_apq8084_probe(struct platform_device *pdev) { int ret; struct regmap *regmap; + struct device_node *node; + + node = of_find_compatible_node(NULL, NULL, "qcom,rpmcc-apq8084"); + + if (IS_ENABLED(CONFIG_QCOM_CLK_SMD_RPM) && + of_device_is_available(node)) { + /* skip registration for gfx3d, as it is controlled by RPMCC */ + mmcc_apq8084_desc.clks[GFX3D_CLK_SRC] = NULL; + } ret = qcom_cc_probe(pdev, &mmcc_apq8084_desc); if (ret) diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c index 715e7cd94125..62185b9a275a 100644 --- a/drivers/clk/qcom/mmcc-msm8974.c +++ b/drivers/clk/qcom/mmcc-msm8974.c @@ -2616,6 +2616,15 @@ MODULE_DEVICE_TABLE(of, mmcc_msm8974_match_table); static int mmcc_msm8974_probe(struct platform_device *pdev) { struct regmap *regmap; + struct device_node *node; + + node = of_find_compatible_node(NULL, NULL, "qcom,rpmcc-msm8974"); + + if (IS_ENABLED(CONFIG_QCOM_CLK_SMD_RPM) && + of_device_is_available(node)) { + /* skip registration for gfx3d, as it is controlled by RPMCC */ + mmcc_msm8974_desc.clks[GFX3D_CLK_SRC] = NULL; + } regmap = qcom_cc_map(pdev, &mmcc_msm8974_desc); if (IS_ERR(regmap)) diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c index ca97e1151797..9b97246287a7 100644 --- a/drivers/clk/qcom/mmcc-msm8996.c +++ b/drivers/clk/qcom/mmcc-msm8996.c @@ -2945,6 +2945,7 @@ static struct gdsc venus_core0_gdsc = { .name = "venus_core0", }, .pwrsts = PWRSTS_OFF_ON, + .flags = HW_CTRL, }; static struct gdsc venus_core1_gdsc = { @@ -2955,6 +2956,7 @@ static struct gdsc venus_core1_gdsc = { .name = "venus_core1", }, .pwrsts = PWRSTS_OFF_ON, + .flags = HW_CTRL, }; static struct gdsc camss_gdsc = { @@ -3034,6 +3036,28 @@ static struct gdsc mdss_gdsc = { .pwrsts = PWRSTS_OFF_ON, }; +static struct gdsc gpu_gdsc = { + .gdscr = 0x4034, + .gds_hw_ctrl = 0x4038, + .pd = { + .name = "gpu", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc gpu_gx_gdsc = { + .gdscr = 0x4024, + .clamp_io_ctrl = 0x4300, + .cxcs = (unsigned int []){ 0x4028 }, + .cxc_count = 1, + .pd = { + .name = "gpu_gx", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = CLAMP_IO, +}; + static struct clk_regmap *mmcc_msm8996_clocks[] = { [MMPLL0_EARLY] = &mmpll0_early.clkr, [MMPLL0_PLL] = &mmpll0.clkr, @@ -3223,6 +3247,8 @@ static struct gdsc *mmcc_msm8996_gdscs[] = { [CPP_GDSC] = &cpp_gdsc, [FD_GDSC] = &fd_gdsc, [MDSS_GDSC] = &mdss_gdsc, + [GPU_GDSC] = &gpu_gdsc, + [GPU_GX_GDSC] = &gpu_gx_gdsc, }; static const struct qcom_reset_map mmcc_msm8996_resets[] = { diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index d89b8afe23b6..62d5fb1f8be0 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -88,6 +88,15 @@ config ARM_OMAP2PLUS_CPUFREQ depends on ARCH_OMAP2PLUS default ARCH_OMAP2PLUS +config ARM_QCOM_CPUFREQ + tristate "Qualcomm based" + depends on ARCH_QCOM + select PM_OPP + help + This adds the CPUFreq driver for Qualcomm SoC based boards. + + If in doubt, say N. + config ARM_S3C_CPUFREQ bool help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 0a9b6a093646..1bdf86fd02bf 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -62,6 +62,7 @@ obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o +obj-$(CONFIG_ARM_QCOM_CPUFREQ) += qcom-cpufreq.o obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 71267626456b..b7a5915d4bf6 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -46,6 +46,8 @@ static const struct of_device_id machines[] __initconst = { { .compatible = "samsung,exynos5800", }, #endif + { .compatible = "qcom,apq8016", }, + { .compatible = "renesas,emev2", }, { .compatible = "renesas,r7s72100", }, { .compatible = "renesas,r8a73a4", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 4d3ec92cbabf..8efc145f443c 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -32,6 +32,9 @@ struct private_data { struct device *cpu_dev; struct thermal_cooling_device *cdev; const char *reg_name; + struct notifier_block opp_nb; + struct mutex lock; + unsigned long opp_freq; }; static struct freq_attr *cpufreq_dt_attr[] = { @@ -43,9 +46,46 @@ static struct freq_attr *cpufreq_dt_attr[] = { static int set_target(struct cpufreq_policy *policy, unsigned int index) { struct private_data *priv = policy->driver_data; + int ret; + unsigned long target_freq = policy->freq_table[index].frequency * 1000; + struct clk *l2_clk = policy->l2_clk; + unsigned int l2_freq; + unsigned long new_l2_freq = 0; + + mutex_lock(&priv->lock); + ret = dev_pm_opp_set_rate(priv->cpu_dev, target_freq); + + if (!ret) { + if (!IS_ERR(l2_clk) && policy->l2_rate[0] && policy->l2_rate[1] && + policy->l2_rate[2]) { + static unsigned long krait_l2[CONFIG_NR_CPUS] = { }; + int cpu, ret = 0; + + if (target_freq >= policy->l2_rate[2]) + new_l2_freq = policy->l2_rate[2]; + else if (target_freq >= policy->l2_rate[1]) + new_l2_freq = policy->l2_rate[1]; + else + new_l2_freq = policy->l2_rate[0]; + + krait_l2[policy->cpu] = new_l2_freq; + for_each_present_cpu(cpu) + new_l2_freq = max(new_l2_freq, krait_l2[cpu]); + + l2_freq = clk_get_rate(l2_clk); + + if (l2_freq != new_l2_freq) { + /* scale l2 with the core */ + ret = clk_set_rate(l2_clk, new_l2_freq); + } + } + + priv->opp_freq = target_freq; + } - return dev_pm_opp_set_rate(priv->cpu_dev, - policy->freq_table[index].frequency * 1000); + mutex_unlock(&priv->lock); + + return ret; } /* @@ -86,6 +126,41 @@ node_put: return name; } +static int opp_notifier(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct dev_pm_opp *opp = data; + struct private_data *priv = container_of(nb, struct private_data, + opp_nb); + struct device *cpu_dev = priv->cpu_dev; + struct regulator *cpu_reg; + unsigned long volt, freq; + int ret = 0; + + if (event == OPP_EVENT_ADJUST_VOLTAGE) { + cpu_reg = dev_pm_opp_get_regulator(cpu_dev); + if (IS_ERR(cpu_reg)) { + ret = PTR_ERR(cpu_reg); + goto out; + } + rcu_read_lock(); + volt = dev_pm_opp_get_voltage(opp); + freq = dev_pm_opp_get_freq(opp); + rcu_read_unlock(); + + mutex_lock(&priv->lock); + if (freq == priv->opp_freq) { + ret = regulator_set_voltage_triplet(cpu_reg, volt, volt, volt); + } + mutex_unlock(&priv->lock); + if (ret) + dev_err(cpu_dev, "failed to scale voltage: %d\n", ret); + } + +out: + return notifier_from_errno(ret); +} + static int resources_available(void) { struct device *cpu_dev; @@ -153,6 +228,9 @@ static int cpufreq_init(struct cpufreq_policy *policy) bool fallback = false; const char *name; int ret; + struct srcu_notifier_head *opp_srcu_head; + struct device_node *l2_np; + struct clk *l2_clk = NULL; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { @@ -239,13 +317,29 @@ static int cpufreq_init(struct cpufreq_policy *policy) goto out_free_opp; } + mutex_init(&priv->lock); + + rcu_read_lock(); + opp_srcu_head = dev_pm_opp_get_notifier(cpu_dev); + if (IS_ERR(opp_srcu_head)) { + ret = PTR_ERR(opp_srcu_head); + rcu_read_unlock(); + goto out_free_priv; + } + + priv->opp_nb.notifier_call = opp_notifier; + ret = srcu_notifier_chain_register(opp_srcu_head, &priv->opp_nb); + rcu_read_unlock(); + if (ret) + goto out_free_priv; + priv->reg_name = name; priv->opp_table = opp_table; ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); - goto out_free_priv; + goto out_unregister_nb; } priv->cpu_dev = cpu_dev; @@ -258,6 +352,13 @@ static int cpufreq_init(struct cpufreq_policy *policy) policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000; rcu_read_unlock(); + l2_clk = clk_get(cpu_dev, "l2"); + if (!IS_ERR(l2_clk)) + policy->l2_clk = l2_clk; + l2_np = of_find_node_by_name(NULL, "qcom,l2"); + if (l2_np) + of_property_read_u32_array(l2_np, "qcom,l2-rates", policy->l2_rate, 3); + ret = cpufreq_table_validate_and_show(policy, freq_table); if (ret) { dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, @@ -284,6 +385,8 @@ static int cpufreq_init(struct cpufreq_policy *policy) out_free_cpufreq_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_unregister_nb: + srcu_notifier_chain_unregister(opp_srcu_head, &priv->opp_nb); out_free_priv: kfree(priv); out_free_opp: diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c new file mode 100644 index 000000000000..c40ee90d07be --- /dev/null +++ b/drivers/cpufreq/qcom-cpufreq.c @@ -0,0 +1,198 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/cpu.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/slab.h> + +static void __init get_krait_bin_format_a(int *speed, int *pvs, int *pvs_ver) +{ + void __iomem *base; + u32 pte_efuse; + + *speed = *pvs = *pvs_ver = 0; + + base = ioremap(0x007000c0, 4); + if (!base) { + pr_warn("Unable to read efuse data. Defaulting to 0!\n"); + return; + } + + pte_efuse = readl_relaxed(base); + iounmap(base); + + *speed = pte_efuse & 0xf; + if (*speed == 0xf) + *speed = (pte_efuse >> 4) & 0xf; + + if (*speed == 0xf) { + *speed = 0; + pr_warn("Speed bin: Defaulting to %d\n", *speed); + } else { + pr_info("Speed bin: %d\n", *speed); + } + + *pvs = (pte_efuse >> 10) & 0x7; + if (*pvs == 0x7) + *pvs = (pte_efuse >> 13) & 0x7; + + if (*pvs == 0x7) { + *pvs = 0; + pr_warn("PVS bin: Defaulting to %d\n", *pvs); + } else { + pr_info("PVS bin: %d\n", *pvs); + } +} + +static void __init get_krait_bin_format_b(int *speed, int *pvs, int *pvs_ver) +{ + u32 pte_efuse, redundant_sel; + void __iomem *base; + + *speed = 0; + *pvs = 0; + *pvs_ver = 0; + + base = ioremap(0xfc4b80b0, 8); + if (!base) { + pr_warn("Unable to read efuse data. Defaulting to 0!\n"); + return; + } + + pte_efuse = readl_relaxed(base); + redundant_sel = (pte_efuse >> 24) & 0x7; + *speed = pte_efuse & 0x7; + /* 4 bits of PVS are in efuse register bits 31, 8-6. */ + *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7); + *pvs_ver = (pte_efuse >> 4) & 0x3; + + switch (redundant_sel) { + case 1: + *speed = (pte_efuse >> 27) & 0xf; + break; + case 2: + *pvs = (pte_efuse >> 27) & 0xf; + break; + } + + /* Check SPEED_BIN_BLOW_STATUS */ + if (pte_efuse & BIT(3)) { + pr_info("Speed bin: %d\n", *speed); + } else { + pr_warn("Speed bin not set. Defaulting to 0!\n"); + *speed = 0; + } + + /* Check PVS_BLOW_STATUS */ + pte_efuse = readl_relaxed(base + 0x4) & BIT(21); + if (pte_efuse) { + pr_info("PVS bin: %d\n", *pvs); + } else { + pr_warn("PVS bin not set. Defaulting to 0!\n"); + *pvs = 0; + } + + pr_info("PVS version: %d\n", *pvs_ver); + iounmap(base); +} + +static int __init qcom_cpufreq_populate_opps(void) +{ + int len, rows, cols, i, k, speed, pvs, pvs_ver; + char table_name[] = "qcom,speedXX-pvsXX-bin-vXX"; + struct device_node *np; + struct device *dev; + int cpu = 0; + + np = of_find_node_by_name(NULL, "qcom,pvs"); + if (!np) + return -ENODEV; + + if (of_property_read_bool(np, "qcom,pvs-format-a")) { + get_krait_bin_format_a(&speed, &pvs, &pvs_ver); + cols = 2; + } else if (of_property_read_bool(np, "qcom,pvs-format-b")) { + get_krait_bin_format_b(&speed, &pvs, &pvs_ver); + cols = 3; + } else { + return -ENODEV; + } + + snprintf(table_name, sizeof(table_name), + "qcom,speed%d-pvs%d-bin-v%d", speed, pvs, pvs_ver); + + if (!of_find_property(np, table_name, &len)) + return -EINVAL; + + len /= sizeof(u32); + if (len % cols || len == 0) + return -EINVAL; + + rows = len / cols; + + for (i = 0, k = 0; i < rows; i++) { + u32 freq, volt; + + of_property_read_u32_index(np, table_name, k++, &freq); + of_property_read_u32_index(np, table_name, k++, &volt); + while (k % cols) + k++; /* Skip uA entries if present */ + for (cpu = 0; cpu < num_possible_cpus(); cpu++) { + dev = get_cpu_device(cpu); + if (!dev) + return -ENODEV; + if (dev_pm_opp_add(dev, freq, volt)) + pr_warn("failed to add OPP %u\n", freq); + } + } + + return 0; +} + +static int __init qcom_cpufreq_driver_init(void) +{ + struct device *cpu_dev; + struct device_node *np; + int ret; + + cpu_dev = get_cpu_device(0); + if (!cpu_dev) + return -ENODEV; + + np = of_node_get(cpu_dev->of_node); + if (!np) + return -ENOENT; + + if (!of_device_is_compatible(np, "qcom,krait")) { + of_node_put(np); + return -ENODEV; + } + of_node_put(np); + + ret = qcom_cpufreq_populate_opps(); + if (ret) + return ret; + + return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1, + NULL, 0)); +} +late_initcall(qcom_cpufreq_driver_init); + +MODULE_DESCRIPTION("Qualcomm CPUfreq driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index c6aeedbdcbb0..9195e4db0a2f 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c @@ -560,3 +560,59 @@ int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) return ret ? : le32_to_cpu(out); } + +int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) +{ + struct { + __le32 state; + __le32 spare; + } req; + __le32 scm_ret = 0; + int ret; + + req.state = cpu_to_le32(state); + req.spare = cpu_to_le32(id); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE, + &req, sizeof(req), &scm_ret, sizeof(scm_ret)); + + return ret ? : le32_to_cpu(scm_ret); +} + +int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, + size_t *size) +{ + return -ENODEV; +} +int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, + u32 size, u32 spare) +{ + return -ENODEV; +} +int __qcom_scm_iommu_dump_fault_regs(struct device *dev, u32 id, + u32 context, u64 addr, u32 len) +{ + return -ENODEV; +} + +int __qcom_scm_iommu_secure_map(struct device *dev, u64 list, + u32 list_size, u32 size, u32 id, + u32 ctx_id, u64 va, u32 info_size, + u32 flags) + +{ + return -ENODEV; +} + +int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, + u32 spare) +{ + return -ENODEV; +} + + +int __qcom_scm_iommu_secure_unmap(struct device *dev, u32 id, u32 ctx_id, + u64 va, u32 size, u32 flags) +{ + return -ENODEV; +} diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index 1e2e5198db53..bb921041b47e 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c @@ -70,6 +70,115 @@ static DEFINE_MUTEX(qcom_scm_lock); #define FIRST_EXT_ARG_IDX 3 #define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1) +#if 1 + +#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" + +#define R0_STR "x0" +#define R1_STR "x1" +#define R2_STR "x2" +#define R3_STR "x3" +#define R4_STR "x4" +#define R5_STR "x5" +#define R6_STR "x6" + +static int __qcom_scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5, + u64 *ret1, u64 *ret2, u64 *ret3) +{ + register u32 r0 asm("r0") = w0; + register u32 r1 asm("r1") = w1; + register u32 r2 asm("r2") = w2; + register u32 r3 asm("r3") = w3; + register u32 r4 asm("r4") = w4; + register u32 r5 asm("r5") = w5; + register u32 r6 asm("r6") = 0; + + do { + asm volatile( + __asmeq("%0", R0_STR) + __asmeq("%1", R1_STR) + __asmeq("%2", R2_STR) + __asmeq("%3", R3_STR) + __asmeq("%4", R0_STR) + __asmeq("%5", R1_STR) + __asmeq("%6", R2_STR) + __asmeq("%7", R3_STR) + __asmeq("%8", R4_STR) + __asmeq("%9", R5_STR) + __asmeq("%10", R6_STR) +#ifdef REQUIRES_SEC + ".arch_extension sec\n" +#endif + "smc #0\n" + : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) + : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), + "r" (r5), "r" (r6) + : "x7", "x8", "x9", "x10", "x11", "x12", "x13", + "x14", "x15", "x16", "x17"); + + } while (r0 == QCOM_SCM_INTERRUPTED); + + if (ret1) + *ret1 = r1; + if (ret2) + *ret2 = r2; + if (ret3) + *ret3 = r3; + + return r0; +} + +#define QCOM_SCM_SIP_FNID(s, c) (((((s) & 0xFF) << 8) | ((c) & 0xFF)) | 0x02000000) + +static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, + const struct qcom_scm_desc *desc, + struct arm_smccc_res *res) +{ + int ret, retry_count = 0; + u32 fn_id = QCOM_SCM_SIP_FNID(svc_id, cmd_id); + u64 x0; + u64 ret1, ret2, ret3; + + x0 = fn_id; + + do { + mutex_lock(&qcom_scm_lock); + + ret = ret1 = ret2 = ret3 = 0; + + ret = __qcom_scm_call_armv8_32(x0, desc->arginfo, + desc->args[0], desc->args[1], + desc->args[2], desc->args[3], + &ret1, &ret2, &ret3); + mutex_unlock(&qcom_scm_lock); + + if (ret == QCOM_SCM_V2_EBUSY) + msleep(QCOM_SCM_EBUSY_WAIT_MS); + + } while (ret == QCOM_SCM_V2_EBUSY && (retry_count++ < QCOM_SCM_EBUSY_MAX_RETRY)); + + res->a0 = (unsigned long)ret; + res->a1 = ret1; + res->a2 = ret2; + res->a3 = ret3; + + if (ret < 0) + pr_err("%s: error: funcid %llx, arginfo: %x, " + "args: %llx, %llx, %llx, %llx, " + "syscall returns: %lx, %llx, %llx, %llx" + " (%d)\n", __func__, + x0, desc->arginfo, + desc->args[0], desc->args[1], desc->args[2], desc->args[3], + res->a0, ret1, ret2, ret3, ret); + + if (ret < 0) + return qcom_scm_remap_error(ret); + + return 0; + +} + +#else /** * qcom_scm_call() - Invoke a syscall in the secure world * @dev: device @@ -151,19 +260,22 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, break; msleep(QCOM_SCM_EBUSY_WAIT_MS); } - } while (res->a0 == QCOM_SCM_V2_EBUSY); + } while ((signed long)res->a0 == QCOM_SCM_V2_EBUSY); if (args_virt) { dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); kfree(args_virt); } - if (res->a0 < 0) + if (res->a0) { + dev_err(dev, "%s: error %lx (a1:%pa, a2:%pa, a3:%pa)\n", __func__, + res->a0, &res->a1, &res->a2, &res->a3); return qcom_scm_remap_error(res->a0); + } return 0; } - +#endif /** * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus * @entry: Entry point function for the cpus @@ -365,3 +477,141 @@ int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) return ret ? : res.a1; } + +int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) +{ + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + int ret; + + desc.args[0] = state; + desc.args[1] = id; + desc.arginfo = QCOM_SCM_ARGS(2); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE, + &desc, &res); + + return ret ? : res.a1; +} + +int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, + size_t *size) +{ + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + int ret; + + desc.args[0] = spare; + desc.arginfo = QCOM_SCM_ARGS(1); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, + QCOM_SCM_IOMMU_SECURE_PTBL_SIZE, &desc, &res); + + if (size) + *size = res.a1; + + return ret ? : res.a2; +} + +int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, + u32 spare) +{ + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + int ret; + + desc.args[0] = addr; + desc.args[1] = size; + desc.args[2] = spare; + desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, + QCOM_SCM_VAL); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, + QCOM_SCM_IOMMU_SECURE_PTBL_INIT, &desc, &res); + + /* the pg table has been initialized already, ignore the error */ + if (ret == -EPERM) + ret = 0; + + return ret; +} + +int __qcom_scm_iommu_dump_fault_regs(struct device *dev, u32 id, u32 context, + u64 addr, u32 len) +{ + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + int ret; + + desc.args[0] = id; + desc.args[1] = context; + desc.args[2] = addr; + desc.args[3] = len; + desc.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_RW, + QCOM_SCM_VAL); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_UTIL, + QCOM_SCM_IOMMU_DUMP_SMMU_FAULT_REGS, &desc, &res); + return ret ? : res.a1; +} + +int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare) +{ + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + int ret; + + desc.args[0] = device_id; + desc.args[1] = spare; + desc.arginfo = QCOM_SCM_ARGS(2); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG, + &desc, &res); + + return ret ? : res.a1; +} + +int __qcom_scm_iommu_secure_map(struct device *dev, u64 list, u32 list_size, + u32 size, u32 id, u32 ctx_id, u64 va, + u32 info_size, u32 flags) +{ + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + int ret; + + desc.args[0] = list; + desc.args[1] = list_size; + desc.args[2] = size; + desc.args[3] = id; + desc.args[4] = ctx_id; + desc.args[5] = va; + desc.args[6] = info_size; + desc.args[7] = flags; + desc.arginfo = QCOM_SCM_ARGS(8, QCOM_SCM_RW, QCOM_SCM_VAL, QCOM_SCM_VAL, + QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_VAL, + QCOM_SCM_VAL, QCOM_SCM_VAL); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, + QCOM_SCM_IOMMU_SECURE_MAP2_FLAT, &desc, &res); + + return ret ? : res.a1; +} + +int __qcom_scm_iommu_secure_unmap(struct device *dev, u32 id, u32 ctx_id, + u64 va, u32 size, u32 flags) +{ + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + int ret; + + desc.args[0] = id; + desc.args[1] = ctx_id; + desc.args[2] = va; + desc.args[3] = size; + desc.args[4] = flags; + desc.arginfo = QCOM_SCM_ARGS(5); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, + QCOM_SCM_IOMMU_SECURE_UNMAP2_FLAT, &desc, &res); + return ret ? : res.a1; +} diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index d95c70227c05..46343a75e1a0 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -28,6 +28,10 @@ #include "qcom_scm.h" +#define SCM_HAS_CORE_CLK BIT(0) +#define SCM_HAS_IFACE_CLK BIT(1) +#define SCM_HAS_BUS_CLK BIT(2) + struct qcom_scm { struct device *dev; struct clk *core_clk; @@ -320,35 +324,95 @@ bool qcom_scm_is_available(void) } EXPORT_SYMBOL(qcom_scm_is_available); +int qcom_scm_set_remote_state(u32 state, u32 id) +{ + return __qcom_scm_set_remote_state(__scm->dev, state, id); +} +EXPORT_SYMBOL(qcom_scm_set_remote_state); + +int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) +{ + return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size); +} +EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); + +int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) +{ + return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare); +} +EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); + +int qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr, u32 len) +{ + return __qcom_scm_iommu_dump_fault_regs(__scm->dev, id, context, addr, + len); +} +EXPORT_SYMBOL(qcom_scm_iommu_dump_fault_regs); + +int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) +{ + return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare); +} +EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); + +int qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size, u32 id, + u32 ctx_id, u64 va, u32 info_size, u32 flags) +{ + return __qcom_scm_iommu_secure_map(__scm->dev, list, list_size, size, + id, ctx_id, va, info_size, flags); +} +EXPORT_SYMBOL(qcom_scm_iommu_secure_map); + +int qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va, u32 size, u32 flags) +{ + return __qcom_scm_iommu_secure_unmap(__scm->dev, id, ctx_id, va, size, + flags); +} +EXPORT_SYMBOL(qcom_scm_iommu_secure_unmap); + +int qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) +{ + return __qcom_scm_is_call_available(__scm->dev, svc_id, cmd_id); +} +EXPORT_SYMBOL(qcom_scm_is_call_available); + static int qcom_scm_probe(struct platform_device *pdev) { struct qcom_scm *scm; + unsigned long clks; int ret; scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); if (!scm) return -ENOMEM; - scm->core_clk = devm_clk_get(&pdev->dev, "core"); - if (IS_ERR(scm->core_clk)) { - if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) + clks = (unsigned long)of_device_get_match_data(&pdev->dev); + if (clks & SCM_HAS_CORE_CLK) { + scm->core_clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(scm->core_clk)) { + if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to acquire core clk\n"); return PTR_ERR(scm->core_clk); - - scm->core_clk = NULL; + } } - if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) { + if (clks & SCM_HAS_IFACE_CLK) { scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); if (IS_ERR(scm->iface_clk)) { if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to acquire iface clk\n"); + dev_err(&pdev->dev, + "failed to acquire iface clk\n"); return PTR_ERR(scm->iface_clk); } + } + if (clks & SCM_HAS_BUS_CLK) { scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); if (IS_ERR(scm->bus_clk)) { if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to acquire bus clk\n"); + dev_err(&pdev->dev, + "failed to acquire bus clk\n"); return PTR_ERR(scm->bus_clk); } } @@ -356,7 +420,9 @@ static int qcom_scm_probe(struct platform_device *pdev) scm->reset.ops = &qcom_scm_pas_reset_ops; scm->reset.nr_resets = 1; scm->reset.of_node = pdev->dev.of_node; - reset_controller_register(&scm->reset); + ret = devm_reset_controller_register(&pdev->dev, &scm->reset); + if (ret) + return ret; /* vote for max clk rate for highest performance */ ret = clk_set_rate(scm->core_clk, INT_MAX); @@ -372,10 +438,23 @@ static int qcom_scm_probe(struct platform_device *pdev) } static const struct of_device_id qcom_scm_dt_match[] = { - { .compatible = "qcom,scm-apq8064",}, - { .compatible = "qcom,scm-msm8660",}, - { .compatible = "qcom,scm-msm8960",}, - { .compatible = "qcom,scm",}, + { .compatible = "qcom,scm-apq8064", + .data = (void *) SCM_HAS_CORE_CLK, + }, + { .compatible = "qcom,scm-msm8660", + .data = (void *) SCM_HAS_CORE_CLK, + }, + { .compatible = "qcom,scm-msm8960", + .data = (void *) SCM_HAS_CORE_CLK, + }, + { .compatible = "qcom,scm-msm8996", + .data = NULL, /* no clocks */ + }, + { .compatible = "qcom,scm", + .data = (void *)(SCM_HAS_CORE_CLK + | SCM_HAS_IFACE_CLK + | SCM_HAS_BUS_CLK), + }, {} }; diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 3584b00fe7e6..3e848d9d5436 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -15,6 +15,8 @@ #define QCOM_SCM_SVC_BOOT 0x1 #define QCOM_SCM_BOOT_ADDR 0x1 #define QCOM_SCM_BOOT_ADDR_MC 0x11 +#define QCOM_SCM_SET_REMOTE_STATE 0xa +extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id); #define QCOM_SCM_FLAG_HLOS 0x01 #define QCOM_SCM_FLAG_COLDBOOT_MC 0x02 @@ -56,8 +58,35 @@ extern int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral); extern int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral); extern int __qcom_scm_pas_mss_reset(struct device *dev, bool reset); +#define QCOM_SCM_SVC_MP 0xc +#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3 +#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4 +extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, + size_t *size); +extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, + u32 size, u32 spare); + +#define QCOM_SCM_SVC_UTIL 0x3 +#define QCOM_SCM_IOMMU_DUMP_SMMU_FAULT_REGS 0xc +extern int __qcom_scm_iommu_dump_fault_regs(struct device *dev, u32 id, + u32 context, u64 addr, u32 len); + +#define QCOM_SCM_RESTORE_SEC_CFG 2 +extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, + u32 spare); + +#define QCOM_SCM_IOMMU_SECURE_MAP2_FLAT 0x12 +extern int __qcom_scm_iommu_secure_map(struct device *dev, u64 list, + u32 list_size, u32 size, u32 id, + u32 ctx_id, u64 va, u32 info_size, + u32 flags); +#define QCOM_SCM_IOMMU_SECURE_UNMAP2_FLAT 0x13 +extern int __qcom_scm_iommu_secure_unmap(struct device *dev, u32 id, u32 ctx_id, + u64 va, u32 size, u32 flags); + /* common error codes */ #define QCOM_SCM_V2_EBUSY -12 +#define QCOM_SCM_NOT_PERMITTED -8 #define QCOM_SCM_ENOMEM -5 #define QCOM_SCM_EOPNOTSUPP -4 #define QCOM_SCM_EINVAL_ADDR -3 @@ -79,6 +108,8 @@ static inline int qcom_scm_remap_error(int err) return -ENOMEM; case QCOM_SCM_V2_EBUSY: return -EBUSY; + case QCOM_SCM_NOT_PERMITTED: + return -EPERM; } return -EINVAL; } diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 25c720454017..06aba1cd04e9 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -15,7 +15,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_modeset_lock.o drm_atomic.o drm_bridge.o \ drm_framebuffer.o drm_connector.o drm_blend.o \ drm_encoder.o drm_mode_object.o drm_property.o \ - drm_plane.o drm_color_mgmt.o + drm_plane.o drm_color_mgmt.o drm_print.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index e138fb51e8ce..474da5e86662 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -453,7 +453,8 @@ static int hdlcd_probe(struct platform_device *pdev) return -EAGAIN; } - component_match_add(&pdev->dev, &match, compare_dev, port); + drm_of_component_match_add(&pdev->dev, &match, compare_dev, port); + of_node_put(port); return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops, match); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 9280358b8f15..9f4739452a25 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -493,7 +493,9 @@ static int malidp_platform_probe(struct platform_device *pdev) return -EAGAIN; } - component_match_add(&pdev->dev, &match, malidp_compare_dev, port); + drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev, + port); + of_node_put(port); return component_master_add_with_match(&pdev->dev, &malidp_master_ops, match); } diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 1e0e68f608e4..94e46da9a758 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -254,7 +254,7 @@ static void armada_add_endpoints(struct device *dev, continue; } - component_match_add(dev, match, compare_of, remote); + drm_of_component_match_add(dev, match, compare_of, remote); of_node_put(remote); } } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 9d4c030672f0..b380179d4013 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -393,7 +393,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane, if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 || state->base.fb->pixel_format == DRM_FORMAT_NV61) && - (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270))) + drm_rotation_90_or_270(state->base.rotation)) cfg |= ATMEL_HLCDC_YUV422ROT; atmel_hlcdc_layer_update_cfg(&plane->layer, @@ -628,7 +628,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, /* * Swap width and size in case of 90 or 270 degrees rotation */ - if (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) { + if (drm_rotation_90_or_270(state->base.rotation)) { tmp = state->crtc_w; state->crtc_w = state->crtc_h; state->crtc_h = tmp; diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig index d2b0499ab7d7..2fed567f9943 100644 --- a/drivers/gpu/drm/bridge/adv7511/Kconfig +++ b/drivers/gpu/drm/bridge/adv7511/Kconfig @@ -6,6 +6,14 @@ config DRM_I2C_ADV7511 help Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders. +config DRM_I2C_ADV7511_AUDIO + bool "ADV7511 HDMI Audio driver" + depends on DRM_I2C_ADV7511 && SND_SOC + select SND_SOC_HDMI_CODEC + help + Support the ADV7511 HDMI Audio interface. This is used in + conjunction with the AV7511 HDMI driver. + config DRM_I2C_ADV7533 bool "ADV7533 encoder" depends on DRM_I2C_ADV7511 diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile index 9019327fff4c..5ba675534f6e 100644 --- a/drivers/gpu/drm/bridge/adv7511/Makefile +++ b/drivers/gpu/drm/bridge/adv7511/Makefile @@ -1,3 +1,4 @@ adv7511-y := adv7511_drv.o +adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 3e74e1a6584c..fe18a5d2d84b 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -12,6 +12,7 @@ #include <linux/hdmi.h> #include <linux/i2c.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_mipi_dsi.h> @@ -309,6 +310,8 @@ struct adv7511 { struct drm_display_mode curr_mode; unsigned int f_tmds; + unsigned int f_audio; + unsigned int audio_source; unsigned int current_edid_segment; uint8_t edid_buf[256]; @@ -329,6 +332,9 @@ struct adv7511 { struct gpio_desc *gpio_pd; + struct regulator_bulk_data *supplies; + unsigned int num_supplies; + /* ADV7533 DSI RX related params */ struct device_node *host_node; struct mipi_dsi_device *dsi; @@ -336,6 +342,7 @@ struct adv7511 { bool use_timing_gen; enum adv7511_type type; + struct platform_device *audio_pdev; }; #ifdef CONFIG_DRM_I2C_ADV7533 @@ -391,4 +398,17 @@ static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv) } #endif +#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO +int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511); +void adv7511_audio_exit(struct adv7511 *adv7511); +#else /*CONFIG_DRM_I2C_ADV7511_AUDIO */ +static inline int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511) +{ + return 0; +} +static inline void adv7511_audio_exit(struct adv7511 *adv7511) +{ +} +#endif /* CONFIG_DRM_I2C_ADV7511_AUDIO */ + #endif /* __DRM_I2C_ADV7511_H__ */ diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c new file mode 100644 index 000000000000..cf92ebfe6ab7 --- /dev/null +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -0,0 +1,213 @@ +/* + * Analog Devices ADV7511 HDMI transmitter driver + * + * Copyright 2012 Analog Devices Inc. + * Copyright (c) 2016, Linaro Limited + * + * Licensed under the GPL-2. + */ + +#include <sound/core.h> +#include <sound/hdmi-codec.h> +#include <sound/pcm.h> +#include <sound/soc.h> + +#include "adv7511.h" + +static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs, + unsigned int *cts, unsigned int *n) +{ + switch (fs) { + case 32000: + *n = 4096; + break; + case 44100: + *n = 6272; + break; + case 48000: + *n = 6144; + break; + } + + *cts = ((f_tmds * *n) / (128 * fs)) * 1000; +} + +static int adv7511_update_cts_n(struct adv7511 *adv7511) +{ + unsigned int cts = 0; + unsigned int n = 0; + + adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n); + + regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf); + regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff); + regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff); + + regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL0, + (cts >> 16) & 0xf); + regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL1, + (cts >> 8) & 0xff); + regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL2, + cts & 0xff); + + return 0; +} + +int adv7511_hdmi_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *hparms) +{ + struct adv7511 *adv7511 = dev_get_drvdata(dev); + unsigned int audio_source, i2s_format = 0; + unsigned int invert_clock; + unsigned int rate; + unsigned int len; + + switch (hparms->sample_rate) { + case 32000: + rate = ADV7511_SAMPLE_FREQ_32000; + break; + case 44100: + rate = ADV7511_SAMPLE_FREQ_44100; + break; + case 48000: + rate = ADV7511_SAMPLE_FREQ_48000; + break; + case 88200: + rate = ADV7511_SAMPLE_FREQ_88200; + break; + case 96000: + rate = ADV7511_SAMPLE_FREQ_96000; + break; + case 176400: + rate = ADV7511_SAMPLE_FREQ_176400; + break; + case 192000: + rate = ADV7511_SAMPLE_FREQ_192000; + break; + default: + return -EINVAL; + } + + switch (hparms->sample_width) { + case 16: + len = ADV7511_I2S_SAMPLE_LEN_16; + break; + case 18: + len = ADV7511_I2S_SAMPLE_LEN_18; + break; + case 20: + len = ADV7511_I2S_SAMPLE_LEN_20; + break; + case 24: + len = ADV7511_I2S_SAMPLE_LEN_24; + break; + default: + return -EINVAL; + } + + switch (fmt->fmt) { + case HDMI_I2S: + audio_source = ADV7511_AUDIO_SOURCE_I2S; + i2s_format = ADV7511_I2S_FORMAT_I2S; + break; + case HDMI_RIGHT_J: + audio_source = ADV7511_AUDIO_SOURCE_I2S; + i2s_format = ADV7511_I2S_FORMAT_RIGHT_J; + break; + case HDMI_LEFT_J: + audio_source = ADV7511_AUDIO_SOURCE_I2S; + i2s_format = ADV7511_I2S_FORMAT_LEFT_J; + break; + default: + return -EINVAL; + } + + invert_clock = fmt->bit_clk_inv; + + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_SOURCE, 0x70, + audio_source << 4); + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(6), + invert_clock << 6); + regmap_update_bits(adv7511->regmap, ADV7511_REG_I2S_CONFIG, 0x03, + i2s_format); + + adv7511->audio_source = audio_source; + + adv7511->f_audio = hparms->sample_rate; + + adv7511_update_cts_n(adv7511); + + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG3, + ADV7511_AUDIO_CFG3_LEN_MASK, len); + regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, + ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4); + regmap_write(adv7511->regmap, 0x73, 0x1); + + return 0; +} + +static int audio_startup(struct device *dev, void *data) +{ + struct adv7511 *adv7511 = dev_get_drvdata(dev); + + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, + BIT(7), 0); + + /* hide Audio infoframe updates */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, + BIT(5), BIT(5)); + /* enable N/CTS, enable Audio sample packets */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, + BIT(5), BIT(5)); + /* enable N/CTS */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, + BIT(6), BIT(6)); + /* not copyrighted */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1, + BIT(5), BIT(5)); + /* enable audio infoframes */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, + BIT(3), BIT(3)); + /* AV mute disable */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0), + BIT(7) | BIT(6), BIT(7)); + /* use Audio infoframe updated info */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1), + BIT(5), 0); + return 0; +} + +static void audio_shutdown(struct device *dev, void *data) +{ +} + +static const struct hdmi_codec_ops adv7511_codec_ops = { + .hw_params = adv7511_hdmi_hw_params, + .audio_shutdown = audio_shutdown, + .audio_startup = audio_startup, +}; + +static struct hdmi_codec_pdata codec_data = { + .ops = &adv7511_codec_ops, + .max_i2s_channels = 2, + .i2s = 1, +}; + +int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511) +{ + adv7511->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + return PTR_ERR_OR_ZERO(adv7511->audio_pdev); +} + +void adv7511_audio_exit(struct adv7511 *adv7511) +{ + if (adv7511->audio_pdev) { + platform_device_unregister(adv7511->audio_pdev); + adv7511->audio_pdev = NULL; + } +} diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 213d892b6fa3..e6b51bab0d30 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -325,7 +325,7 @@ static void adv7511_set_link_config(struct adv7511 *adv7511, adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB; } -static void adv7511_power_on(struct adv7511 *adv7511) +static void __adv7511_power_on(struct adv7511 *adv7511) { adv7511->current_edid_segment = -1; @@ -338,7 +338,7 @@ static void adv7511_power_on(struct adv7511 *adv7511) * Still, let's be safe and stick to the documentation. */ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0), - ADV7511_INT0_EDID_READY); + ADV7511_INT0_EDID_READY | ADV7511_INT0_HPD); regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), ADV7511_INT1_DDC_ERROR); } @@ -354,6 +354,11 @@ static void adv7511_power_on(struct adv7511 *adv7511) regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, ADV7511_REG_POWER2_HPD_SRC_MASK, ADV7511_REG_POWER2_HPD_SRC_NONE); +} + +static void adv7511_power_on(struct adv7511 *adv7511) +{ + __adv7511_power_on(adv7511); /* * Most of the registers are reset during power down or when HPD is low. @@ -362,21 +367,23 @@ static void adv7511_power_on(struct adv7511 *adv7511) if (adv7511->type == ADV7533) adv7533_dsi_power_on(adv7511); - adv7511->powered = true; } -static void adv7511_power_off(struct adv7511 *adv7511) +static void __adv7511_power_off(struct adv7511 *adv7511) { /* TODO: setup additional power down modes */ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN); regcache_mark_dirty(adv7511->regmap); +} +static void adv7511_power_off(struct adv7511 *adv7511) +{ + __adv7511_power_off(adv7511); if (adv7511->type == ADV7533) adv7533_dsi_power_off(adv7511); - adv7511->powered = false; } @@ -402,10 +409,14 @@ static bool adv7511_hpd(struct adv7511 *adv7511) return false; } +static int adv7511_get_modes(struct adv7511 *adv7511, + struct drm_connector *connector); + static void adv7511_hpd_work(struct work_struct *work) { struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work); enum drm_connector_status status; + struct drm_device *dev = adv7511->connector.dev; unsigned int val; int ret; @@ -417,9 +428,31 @@ static void adv7511_hpd_work(struct work_struct *work) else status = connector_status_disconnected; + /* + * see adv7511_detect(), we do the same thing, but don't check + * for the ADV7511_INT0_HPD bit in ADV7511_REG_INT(0) since we've + * already checked that. + */ + if (status == connector_status_connected && adv7511->powered) { + regcache_mark_dirty(adv7511->regmap); + adv7511_power_on(adv7511); + + mutex_lock(&dev->mode_config.mutex); + adv7511_get_modes(adv7511, &adv7511->connector); + mutex_unlock(&dev->mode_config.mutex); + + if (adv7511->status == connector_status_connected) + status = connector_status_disconnected; + } else { + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_BOTH); + } + + adv7511->status = status; if (adv7511->connector.status != status) { adv7511->connector.status = status; - drm_kms_helper_hotplug_event(adv7511->connector.dev); + drm_kms_helper_hotplug_event(dev); } } @@ -567,23 +600,20 @@ static int adv7511_get_modes(struct adv7511 *adv7511, /* Reading the EDID only works if the device is powered */ if (!adv7511->powered) { - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, - ADV7511_POWER_POWER_DOWN, 0); - if (adv7511->i2c_main->irq) { - regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0), - ADV7511_INT0_EDID_READY); - regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), - ADV7511_INT1_DDC_ERROR); - } - adv7511->current_edid_segment = -1; + unsigned int edid_i2c_addr = + (adv7511->i2c_main->addr << 1) + 4; + + __adv7511_power_on(adv7511); + + /* Reset the EDID_I2C_ADDR register as it might be cleared */ + regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, + edid_i2c_addr); } edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511); if (!adv7511->powered) - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, - ADV7511_POWER_POWER_DOWN, - ADV7511_POWER_POWER_DOWN); + __adv7511_power_off(adv7511); kfree(adv7511->edid); adv7511->edid = edid; @@ -846,6 +876,10 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge) if (adv->type == ADV7533) ret = adv7533_attach_dsi(adv); + if (adv->i2c_main->irq) + regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0), + ADV7511_INT0_HPD); + return ret; } @@ -860,6 +894,58 @@ static struct drm_bridge_funcs adv7511_bridge_funcs = { * Probe & remove */ +static const char * const adv7511_supply_names[] = { + "avdd", + "dvdd", + "pvdd", + "bgvdd", + "dvdd-3v", +}; + +static const char * const adv7533_supply_names[] = { + "avdd", + "dvdd", + "pvdd", + "a2vdd", + "v3p3", + "v1p2", +}; + +static int adv7511_init_regulators(struct adv7511 *adv) +{ + struct device *dev = &adv->i2c_main->dev; + const char * const *supply_names; + unsigned int i; + int ret; + + if (adv->type == ADV7511) { + supply_names = adv7511_supply_names; + adv->num_supplies = ARRAY_SIZE(adv7511_supply_names); + } else { + supply_names = adv7533_supply_names; + adv->num_supplies = ARRAY_SIZE(adv7533_supply_names); + } + + adv->supplies = devm_kcalloc(dev, adv->num_supplies, + sizeof(*adv->supplies), GFP_KERNEL); + if (!adv->supplies) + return -ENOMEM; + + for (i = 0; i < adv->num_supplies; i++) + adv->supplies[i].supply = supply_names[i]; + + ret = devm_regulator_bulk_get(dev, adv->num_supplies, adv->supplies); + if (ret) + return ret; + + return regulator_bulk_enable(adv->num_supplies, adv->supplies); +} + +static void adv7511_uninit_regulators(struct adv7511 *adv) +{ + regulator_bulk_disable(adv->num_supplies, adv->supplies); +} + static int adv7511_parse_dt(struct device_node *np, struct adv7511_link_config *config) { @@ -960,6 +1046,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) if (!adv7511) return -ENOMEM; + adv7511->i2c_main = i2c; adv7511->powered = false; adv7511->status = connector_status_disconnected; @@ -977,13 +1064,21 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) if (ret) return ret; + ret = adv7511_init_regulators(adv7511); + if (ret) { + dev_err(dev, "failed to init regulators\n"); + return ret; + } + /* * The power down GPIO is optional. If present, toggle it from active to * inactive to wake up the encoder. */ adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH); - if (IS_ERR(adv7511->gpio_pd)) - return PTR_ERR(adv7511->gpio_pd); + if (IS_ERR(adv7511->gpio_pd)) { + ret = PTR_ERR(adv7511->gpio_pd); + goto uninit_regulators; + } if (adv7511->gpio_pd) { mdelay(5); @@ -991,12 +1086,14 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) } adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config); - if (IS_ERR(adv7511->regmap)) - return PTR_ERR(adv7511->regmap); + if (IS_ERR(adv7511->regmap)) { + ret = PTR_ERR(adv7511->regmap); + goto uninit_regulators; + } ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val); if (ret) - return ret; + goto uninit_regulators; dev_dbg(dev, "Rev. %d\n", val); if (adv7511->type == ADV7511) @@ -1006,7 +1103,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) else ret = adv7533_patch_registers(adv7511); if (ret) - return ret; + goto uninit_regulators; regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr); regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR, @@ -1016,10 +1113,11 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) adv7511_packet_disable(adv7511, 0xffff); - adv7511->i2c_main = i2c; adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1); - if (!adv7511->i2c_edid) - return -ENOMEM; + if (!adv7511->i2c_edid) { + ret = -ENOMEM; + goto uninit_regulators; + } if (adv7511->type == ADV7533) { ret = adv7533_init_cec(adv7511); @@ -1060,12 +1158,16 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) goto err_unregister_cec; } + adv7511_audio_init(dev, adv7511); + return 0; err_unregister_cec: adv7533_uninit_cec(adv7511); err_i2c_unregister_edid: i2c_unregister_device(adv7511->i2c_edid); +uninit_regulators: + adv7511_uninit_regulators(adv7511); return ret; } @@ -1079,8 +1181,12 @@ static int adv7511_remove(struct i2c_client *i2c) adv7533_uninit_cec(adv7511); } + adv7511_uninit_regulators(adv7511); + drm_bridge_remove(&adv7511->bridge); + adv7511_audio_exit(adv7511); + i2c_unregister_device(adv7511->i2c_edid); kfree(adv7511->edid); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c index d7f7b7ce8ebe..8b210373cfa2 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7533.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -29,6 +29,7 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = { { 0x17, 0xd0 }, { 0x24, 0x20 }, { 0x57, 0x11 }, + { 0x05, 0xc8 }, }; static const struct regmap_config adv7533_cec_regmap_config = { diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 34adde169a78..5371d1dd1fa8 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -30,6 +30,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_mode.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> #include "drm_crtc_internal.h" @@ -609,6 +610,28 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc, return 0; } +static void drm_atomic_crtc_print_state(struct drm_printer *p, + const struct drm_crtc_state *state) +{ + struct drm_crtc *crtc = state->crtc; + + drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); + drm_printf(p, "\tenable=%d\n", state->enable); + drm_printf(p, "\tactive=%d\n", state->active); + drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); + drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); + drm_printf(p, "\tactive_changed=%d\n", state->active_changed); + drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); + drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); + drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); + drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); + drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); + drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); + + if (crtc->funcs->atomic_print_state) + crtc->funcs->atomic_print_state(p, state); +} + /** * drm_atomic_get_plane_state - get plane state * @state: global atomic state object @@ -712,7 +735,10 @@ int drm_atomic_plane_set_property(struct drm_plane *plane, state->src_w = val; } else if (property == config->prop_src_h) { state->src_h = val; - } else if (property == config->rotation_property) { + } else if (property == config->rotation_property || + property == plane->rotation_property) { + if (!is_power_of_2(val & DRM_ROTATE_MASK)) + return -EINVAL; state->rotation = val; } else if (property == plane->zpos_property) { state->zpos = val; @@ -770,7 +796,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->src_w; } else if (property == config->prop_src_h) { *val = state->src_h; - } else if (property == config->rotation_property) { + } else if (property == config->rotation_property || + property == plane->rotation_property) { *val = state->rotation; } else if (property == plane->zpos_property) { *val = state->zpos; @@ -883,6 +910,38 @@ static int drm_atomic_plane_check(struct drm_plane *plane, return 0; } +static void drm_atomic_plane_print_state(struct drm_printer *p, + const struct drm_plane_state *state) +{ + struct drm_plane *plane = state->plane; + struct drm_rect src = drm_plane_state_src(state); + struct drm_rect dest = drm_plane_state_dest(state); + + drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); + drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); + drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); + if (state->fb) { + struct drm_framebuffer *fb = state->fb; + int i, n = drm_format_num_planes(fb->pixel_format); + + drm_printf(p, "\t\tformat=%s\n", + drm_get_format_name(fb->pixel_format)); + drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height); + drm_printf(p, "\t\tlayers:\n"); + for (i = 0; i < n; i++) { + drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]); + drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]); + drm_printf(p, "\t\t\tmodifier[%d]=0x%llx\n", i, fb->modifier[i]); + } + } + drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); + drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); + drm_printf(p, "\trotation=%x\n", state->rotation); + + if (plane->funcs->atomic_print_state) + plane->funcs->atomic_print_state(p, state); +} + /** * drm_atomic_get_connector_state - get connector state * @state: global atomic state object @@ -998,6 +1057,18 @@ int drm_atomic_connector_set_property(struct drm_connector *connector, } EXPORT_SYMBOL(drm_atomic_connector_set_property); +static void drm_atomic_connector_print_state(struct drm_printer *p, + const struct drm_connector_state *state) +{ + struct drm_connector *connector = state->connector; + + drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); + drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); + + if (connector->funcs->atomic_print_state) + connector->funcs->atomic_print_state(p, state); +} + /** * drm_atomic_connector_get_property - get property value from connector state * @connector: the drm connector to set a property on @@ -1465,6 +1536,92 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) } EXPORT_SYMBOL(drm_atomic_nonblocking_commit); +static void drm_atomic_print_state(const struct drm_atomic_state *state) +{ + struct drm_printer p = drm_info_printer(state->dev->dev); + struct drm_plane *plane; + struct drm_plane_state *plane_state; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + struct drm_connector_state *connector_state; + int i; + + DRM_DEBUG_ATOMIC("checking %p\n", state); + + for_each_plane_in_state(state, plane, plane_state, i) + drm_atomic_plane_print_state(&p, plane_state); + + for_each_crtc_in_state(state, crtc, crtc_state, i) + drm_atomic_crtc_print_state(&p, crtc_state); + + for_each_connector_in_state(state, connector, connector_state, i) + drm_atomic_connector_print_state(&p, connector_state); +} + +/** + * drm_state_dump - dump entire device atomic state + * @dev: the drm device + * @p: where to print the state to + * + * Just for debugging. Drivers might want an option to dump state + * to dmesg in case of error irq's. (Hint, you probably want to + * ratelimit this!) + * + * The caller must drm_modeset_lock_all(), or if this is called + * from error irq handler, it should not be enabled by default. + * (Ie. if you are debugging errors you might not care that this + * is racey. But calling this without all modeset locks held is + * not inherently safe.) + */ +void drm_state_dump(struct drm_device *dev, struct drm_printer *p) +{ + struct drm_mode_config *config = &dev->mode_config; + struct drm_plane *plane; + struct drm_crtc *crtc; + struct drm_connector *connector; + + if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) + return; + + list_for_each_entry(plane, &config->plane_list, head) + drm_atomic_plane_print_state(p, plane->state); + + list_for_each_entry(crtc, &config->crtc_list, head) + drm_atomic_crtc_print_state(p, crtc->state); + + list_for_each_entry(connector, &config->connector_list, head) + drm_atomic_connector_print_state(p, connector->state); +} +EXPORT_SYMBOL(drm_state_dump); + +#ifdef CONFIG_DEBUG_FS +static int drm_state_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_printer p = drm_seq_file_printer(m); + + drm_modeset_lock_all(dev); + drm_state_dump(dev, &p); + drm_modeset_unlock_all(dev); + + return 0; +} + +/* any use in debugfs files to dump individual planes/crtc/etc? */ +static const struct drm_info_list drm_atomic_debugfs_list[] = { + {"state", drm_state_info, 0}, +}; + +int drm_atomic_debugfs_init(struct drm_minor *minor) +{ + return drm_debugfs_create_files(drm_atomic_debugfs_list, + ARRAY_SIZE(drm_atomic_debugfs_list), + minor->debugfs_root, minor); +} +#endif + /* * The big monstor ioctl */ @@ -1754,6 +1911,9 @@ retry: } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { ret = drm_atomic_nonblocking_commit(state); } else { + if (unlikely(drm_debug & DRM_UT_STATE)) + drm_atomic_print_state(state); + ret = drm_atomic_commit(state); } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 50acd799babe..1665066d2bb6 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2421,7 +2421,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, primary_state->crtc_h = vdisplay; primary_state->src_x = set->x << 16; primary_state->src_y = set->y << 16; - if (primary_state->rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) { + if (drm_rotation_90_or_270(primary_state->rotation)) { primary_state->src_w = vdisplay << 16; primary_state->src_h = hdisplay << 16; } else { diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 85172a977bf3..e52aece30900 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -162,6 +162,41 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, } EXPORT_SYMBOL(drm_mode_create_rotation_property); +int drm_plane_create_rotation_property(struct drm_plane *plane, + unsigned int rotation, + unsigned int supported_rotations) +{ + static const struct drm_prop_enum_list props[] = { + { __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" }, + { __builtin_ffs(DRM_ROTATE_90) - 1, "rotate-90" }, + { __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" }, + { __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" }, + { __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" }, + { __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" }, + }; + struct drm_property *prop; + + WARN_ON((supported_rotations & DRM_ROTATE_MASK) == 0); + WARN_ON(!is_power_of_2(rotation & DRM_ROTATE_MASK)); + WARN_ON(rotation & ~supported_rotations); + + prop = drm_property_create_bitmask(plane->dev, 0, "rotation", + props, ARRAY_SIZE(props), + supported_rotations); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&plane->base, prop, rotation); + + if (plane->state) + plane->state->rotation = rotation; + + plane->rotation_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_plane_create_rotation_property); + /** * drm_rotation_simplify() - Try to simplify the rotation * @rotation: Rotation to be simplified diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 2d7bedf28647..c055a63b9ea0 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -102,8 +102,6 @@ out: } EXPORT_SYMBOL(drm_crtc_force_disable_all); -DEFINE_WW_CLASS(crtc_ww_class); - static unsigned int drm_num_crtcs(struct drm_device *dev) { unsigned int num = 0; @@ -205,9 +203,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, crtc->primary = primary; crtc->cursor = cursor; - if (primary) + if (primary && !primary->possible_crtcs) primary->possible_crtcs = 1 << drm_crtc_index(crtc); - if (cursor) + if (cursor && !cursor->possible_crtcs) cursor->possible_crtcs = 1 << drm_crtc_index(crtc); if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { @@ -695,8 +693,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc, drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); if (crtc->state && - crtc->primary->state->rotation & (DRM_ROTATE_90 | - DRM_ROTATE_270)) + drm_rotation_90_or_270(crtc->primary->state->rotation)) swap(hdisplay, vdisplay); return drm_framebuffer_check_src_coords(x << 16, y << 16, diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 1205790ed960..22255088732c 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -36,6 +36,7 @@ #include <linux/export.h> #include <drm/drmP.h> #include <drm/drm_edid.h> +#include <drm/drm_atomic.h> #include "drm_internal.h" #if defined(CONFIG_DEBUG_FS) @@ -163,6 +164,14 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, return ret; } + if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { + ret = drm_atomic_debugfs_init(minor); + if (ret) { + DRM_ERROR("Failed to create atomic debugfs files\n"); + return ret; + } + } + if (dev->driver->debugfs_init) { ret = dev->driver->debugfs_init(minor); if (ret) { diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 6a48d6637e5c..ebe174482deb 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -399,7 +399,11 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) if (plane->type != DRM_PLANE_TYPE_PRIMARY) drm_plane_force_disable(plane); - if (dev->mode_config.rotation_property) { + if (plane->rotation_property) { + drm_mode_plane_set_obj_prop(plane, + plane->rotation_property, + DRM_ROTATE_0); + } else if (dev->mode_config.rotation_property) { drm_mode_plane_set_obj_prop(plane, dev->mode_config.rotation_property, DRM_ROTATE_0); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index e14366de0e6e..5748c0be6614 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -49,13 +49,7 @@ */ void drm_mode_debug_printmodeline(const struct drm_display_mode *mode) { - DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " - "0x%x 0x%x\n", - mode->base.id, mode->name, mode->vrefresh, mode->clock, - mode->hdisplay, mode->hsync_start, - mode->hsync_end, mode->htotal, - mode->vdisplay, mode->vsync_start, - mode->vsync_end, mode->vtotal, mode->type, mode->flags); + DRM_DEBUG_KMS("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); } EXPORT_SYMBOL(drm_mode_debug_printmodeline); diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 61146f5b4f56..9059fe3145a1 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -60,6 +60,8 @@ * lists and lookup data structures. */ +static DEFINE_WW_CLASS(crtc_ww_class); + /** * drm_modeset_lock_all - take all modeset locks * @dev: DRM device @@ -398,6 +400,17 @@ int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx) EXPORT_SYMBOL(drm_modeset_backoff_interruptible); /** + * drm_modeset_lock_init - initialize lock + * @lock: lock to init + */ +void drm_modeset_lock_init(struct drm_modeset_lock *lock) +{ + ww_mutex_init(&lock->mutex, &crtc_ww_class); + INIT_LIST_HEAD(&lock->head); +} +EXPORT_SYMBOL(drm_modeset_lock_init); + +/** * drm_modeset_lock - take modeset lock * @lock: lock to take * @ctx: acquire ctx diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index bc98bb94264d..47848ed8ca48 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -6,6 +6,11 @@ #include <drm/drm_crtc.h> #include <drm/drm_of.h> +static void drm_release_of(struct device *dev, void *data) +{ + of_node_put(data); +} + /** * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node * @dev: DRM device @@ -64,6 +69,24 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, EXPORT_SYMBOL(drm_of_find_possible_crtcs); /** + * drm_of_component_match_add - Add a component helper OF node match rule + * @master: master device + * @matchptr: component match pointer + * @compare: compare function used for matching component + * @node: of_node + */ +void drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node) +{ + of_node_get(node); + component_match_add_release(master, matchptr, drm_release_of, + compare, node); +} +EXPORT_SYMBOL_GPL(drm_of_component_match_add); + +/** * drm_of_component_probe - Generic probe function for a component based master * @dev: master device containing the OF node * @compare_of: compare function used for matching components @@ -101,7 +124,7 @@ int drm_of_component_probe(struct device *dev, continue; } - component_match_add(dev, &match, compare_of, port); + drm_of_component_match_add(dev, &match, compare_of, port); of_node_put(port); } @@ -140,7 +163,8 @@ int drm_of_component_probe(struct device *dev, continue; } - component_match_add(dev, &match, compare_of, remote); + drm_of_component_match_add(dev, &match, compare_of, + remote); of_node_put(remote); } of_node_put(port); diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 7899fc1dcdb0..7a7dddf604d7 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -130,15 +130,8 @@ int drm_plane_helper_check_state(struct drm_plane_state *state, unsigned int rotation = state->rotation; int hscale, vscale; - src->x1 = state->src_x; - src->y1 = state->src_y; - src->x2 = state->src_x + state->src_w; - src->y2 = state->src_y + state->src_h; - - dst->x1 = state->crtc_x; - dst->y1 = state->crtc_y; - dst->x2 = state->crtc_x + state->crtc_w; - dst->y2 = state->crtc_y + state->crtc_h; + *src = drm_plane_state_src(state); + *dst = drm_plane_state_dest(state); if (!fb) { state->visible = false; diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c new file mode 100644 index 000000000000..34eb85618b76 --- /dev/null +++ b/drivers/gpu/drm/drm_print.c @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2016 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + */ + +#include <stdarg.h> +#include <linux/seq_file.h> +#include <drm/drmP.h> +#include <drm/drm_print.h> + +void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf) +{ + seq_printf(p->arg, "%pV", vaf); +} +EXPORT_SYMBOL(__drm_printfn_seq_file); + +void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf) +{ + dev_printk(KERN_INFO, p->arg, "[" DRM_NAME "] %pV", vaf); +} +EXPORT_SYMBOL(__drm_printfn_info); + +void drm_printf(struct drm_printer *p, const char *f, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, f); + vaf.fmt = f; + vaf.va = &args; + p->printfn(p, &vaf); + va_end(args); +} +EXPORT_SYMBOL(drm_printf); diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c index 73e53a8d1b37..e6057d8cdcd5 100644 --- a/drivers/gpu/drm/drm_rect.c +++ b/drivers/gpu/drm/drm_rect.c @@ -281,17 +281,10 @@ EXPORT_SYMBOL(drm_rect_calc_vscale_relaxed); */ void drm_rect_debug_print(const char *prefix, const struct drm_rect *r, bool fixed_point) { - int w = drm_rect_width(r); - int h = drm_rect_height(r); - if (fixed_point) - DRM_DEBUG_KMS("%s%d.%06ux%d.%06u%+d.%06u%+d.%06u\n", prefix, - w >> 16, ((w & 0xffff) * 15625) >> 10, - h >> 16, ((h & 0xffff) * 15625) >> 10, - r->x1 >> 16, ((r->x1 & 0xffff) * 15625) >> 10, - r->y1 >> 16, ((r->y1 & 0xffff) * 15625) >> 10); + DRM_DEBUG_KMS("%s" DRM_RECT_FP_FMT "\n", prefix, DRM_RECT_FP_ARG(r)); else - DRM_DEBUG_KMS("%s%dx%d%+d%+d\n", prefix, w, h, r->x1, r->y1); + DRM_DEBUG_KMS("%s" DRM_RECT_FMT "\n", prefix, DRM_RECT_ARG(r)); } EXPORT_SYMBOL(drm_rect_debug_print); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index aa687669e22b..0dee6acbd880 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -16,6 +16,7 @@ #include <linux/component.h> #include <linux/of_platform.h> +#include <drm/drm_of.h> #include "etnaviv_drv.h" #include "etnaviv_gpu.h" @@ -629,8 +630,8 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) if (!core_node) break; - component_match_add(&pdev->dev, &match, compare_of, - core_node); + drm_of_component_match_add(&pdev->dev, &match, + compare_of, core_node); of_node_put(core_node); } } else if (dev->platform_data) { diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 90377a609c98..e88fde18c946 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -24,6 +24,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_of.h> #include "kirin_drm_drv.h" @@ -260,14 +261,13 @@ static struct device_node *kirin_get_remote_node(struct device_node *np) DRM_ERROR("no valid endpoint node\n"); return ERR_PTR(-ENODEV); } - of_node_put(endpoint); remote = of_graph_get_remote_port_parent(endpoint); + of_node_put(endpoint); if (!remote) { DRM_ERROR("no valid remote node\n"); return ERR_PTR(-ENODEV); } - of_node_put(remote); if (!of_device_is_available(remote)) { DRM_ERROR("not available for remote node\n"); @@ -294,7 +294,8 @@ static int kirin_drm_platform_probe(struct platform_device *pdev) if (IS_ERR(remote)) return PTR_ERR(remote); - component_match_add(dev, &match, compare_of, remote); + drm_of_component_match_add(dev, &match, compare_of, remote); + of_node_put(remote); return component_master_add_with_match(dev, &kirin_drm_ops, match); diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index b82de3072d4f..c762ae549a1c 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -142,8 +142,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane, intel_state->clip.y2 = crtc_state->base.enable ? crtc_state->pipe_src_h : 0; - if (state->fb && intel_rotation_90_or_270(state->rotation)) { + if (state->fb && drm_rotation_90_or_270(state->rotation)) { char *format_name; + if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) { DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n"); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f8efd20e4a90..a4cb4a2f553d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2139,7 +2139,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, const struct drm_framebuffer *fb, unsigned int rotation) { - if (intel_rotation_90_or_270(rotation)) { + if (drm_rotation_90_or_270(rotation)) { *view = i915_ggtt_view_rotated; view->params.rotated = to_intel_framebuffer(fb)->rot_info; } else { @@ -2263,7 +2263,7 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, unsigned int rotation) { - if (intel_rotation_90_or_270(rotation)) + if (drm_rotation_90_or_270(rotation)) return to_intel_framebuffer(fb)->rotated[plane].pitch; else return fb->pitches[plane]; @@ -2299,7 +2299,7 @@ void intel_add_fb_offsets(int *x, int *y, const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb); unsigned int rotation = state->base.rotation; - if (intel_rotation_90_or_270(rotation)) { + if (drm_rotation_90_or_270(rotation)) { *x += intel_fb->rotated[plane].x; *y += intel_fb->rotated[plane].y; } else { @@ -2363,7 +2363,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y, intel_tile_dims(dev_priv, &tile_width, &tile_height, fb->modifier[plane], cpp); - if (intel_rotation_90_or_270(rotation)) { + if (drm_rotation_90_or_270(rotation)) { pitch_tiles = pitch / tile_height; swap(tile_width, tile_height); } else { @@ -2419,7 +2419,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, intel_tile_dims(dev_priv, &tile_width, &tile_height, fb_modifier, cpp); - if (intel_rotation_90_or_270(rotation)) { + if (drm_rotation_90_or_270(rotation)) { pitch_tiles = pitch / tile_height; swap(tile_width, tile_height); } else { @@ -2821,14 +2821,8 @@ valid_fb: plane_state->crtc_w = fb->width; plane_state->crtc_h = fb->height; - intel_state->base.src.x1 = plane_state->src_x; - intel_state->base.src.y1 = plane_state->src_y; - intel_state->base.src.x2 = plane_state->src_x + plane_state->src_w; - intel_state->base.src.y2 = plane_state->src_y + plane_state->src_h; - intel_state->base.dst.x1 = plane_state->crtc_x; - intel_state->base.dst.y1 = plane_state->crtc_y; - intel_state->base.dst.x2 = plane_state->crtc_x + plane_state->crtc_w; - intel_state->base.dst.y2 = plane_state->crtc_y + plane_state->crtc_h; + intel_state->base.src = drm_plane_state_src(plane_state); + intel_state->base.dst = drm_plane_state_dest(plane_state); obj = intel_fb_obj(fb); if (i915_gem_object_is_tiled(obj)) @@ -2983,7 +2977,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) return 0; /* Rotate src coordinates to match rotated GTT view */ - if (intel_rotation_90_or_270(rotation)) + if (drm_rotation_90_or_270(rotation)) drm_rect_rotate(&plane_state->base.src, fb->width << 16, fb->height << 16, DRM_ROTATE_270); @@ -3284,7 +3278,7 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, * The stride is either expressed as a multiple of 64 bytes chunks for * linear buffers or in number of tiles for tiled buffers. */ - if (intel_rotation_90_or_270(rotation)) { + if (drm_rotation_90_or_270(rotation)) { int cpp = drm_format_plane_cpp(fb->pixel_format, plane); stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp); @@ -4671,7 +4665,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, to_intel_crtc(crtc_state->base.crtc); int need_scaling; - need_scaling = intel_rotation_90_or_270(rotation) ? + need_scaling = drm_rotation_90_or_270(rotation) ? (src_h != dst_w || src_w != dst_h): (src_w != dst_w || src_h != dst_h); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a19ec06f9e42..f3500a1c24aa 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1285,12 +1285,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, unsigned int intel_tile_height(const struct drm_i915_private *dev_priv, uint64_t fb_modifier, unsigned int cpp); -static inline bool -intel_rotation_90_or_270(unsigned int rotation) -{ - return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270); -} - void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index c43dd9abce79..81ef479f5b8b 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -84,7 +84,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, { int w, h; - if (intel_rotation_90_or_270(cache->plane.rotation)) { + if (drm_rotation_90_or_270(cache->plane.rotation)) { w = cache->plane.src_h; h = cache->plane.src_w; } else { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 49de4760cc16..315f8a2f4a31 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3182,7 +3182,7 @@ skl_plane_downscale_amount(const struct intel_plane_state *pstate) src_h = drm_rect_height(&pstate->base.src); dst_w = drm_rect_width(&pstate->base.dst); dst_h = drm_rect_height(&pstate->base.dst); - if (intel_rotation_90_or_270(pstate->base.rotation)) + if (drm_rotation_90_or_270(pstate->base.rotation)) swap(dst_w, dst_h); downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); @@ -3213,7 +3213,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, width = drm_rect_width(&intel_pstate->base.src) >> 16; height = drm_rect_height(&intel_pstate->base.src) >> 16; - if (intel_rotation_90_or_270(pstate->rotation)) + if (drm_rotation_90_or_270(pstate->rotation)) swap(width, height); /* for planar format */ @@ -3313,7 +3313,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate, src_w = drm_rect_width(&intel_pstate->base.src) >> 16; src_h = drm_rect_height(&intel_pstate->base.src) >> 16; - if (intel_rotation_90_or_270(pstate->rotation)) + if (drm_rotation_90_or_270(pstate->rotation)) swap(src_w, src_h); /* Halve UV plane width and height for NV12 */ @@ -3327,7 +3327,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate, else plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0); - if (intel_rotation_90_or_270(pstate->rotation)) { + if (drm_rotation_90_or_270(pstate->rotation)) { switch (plane_bpp) { case 1: min_scanlines = 32; @@ -3573,7 +3573,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, width = drm_rect_width(&intel_pstate->base.src) >> 16; height = drm_rect_height(&intel_pstate->base.src) >> 16; - if (intel_rotation_90_or_270(pstate->rotation)) + if (drm_rotation_90_or_270(pstate->rotation)) swap(width, height); cpp = drm_format_plane_cpp(fb->pixel_format, 0); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 64f4e2e18594..dd1bd8eca79e 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -790,15 +790,8 @@ intel_check_sprite_plane(struct drm_plane *plane, bool can_scale; int ret; - src->x1 = state->base.src_x; - src->y1 = state->base.src_y; - src->x2 = state->base.src_x + state->base.src_w; - src->y2 = state->base.src_y + state->base.src_h; - - dst->x1 = state->base.crtc_x; - dst->y1 = state->base.crtc_y; - dst->x2 = state->base.crtc_x + state->base.crtc_w; - dst->y2 = state->base.crtc_y + state->base.crtc_h; + *src = drm_plane_state_src(&state->base); + *dst = drm_plane_state_dest(&state->base); if (!fb) { state->base.visible = false; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index cf83f6507ec8..9c5430fb82a2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -18,6 +18,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_gem.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_of.h> #include <linux/component.h> #include <linux/iommu.h> #include <linux/of_address.h> @@ -415,7 +416,8 @@ static int mtk_drm_probe(struct platform_device *pdev) comp_type == MTK_DPI) { dev_info(dev, "Adding component match for %s\n", node->full_name); - component_match_add(dev, &match, compare_of, node); + drm_of_component_match_add(dev, &match, compare_of, + node); } else { struct mtk_ddp_comp *comp; diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 4e2806cf778c..90f66c408120 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -37,6 +37,7 @@ msm-y := \ mdp/mdp5/mdp5_irq.o \ mdp/mdp5/mdp5_mdss.o \ mdp/mdp5/mdp5_kms.o \ + mdp/mdp5/mdp5_pipe.o \ mdp/mdp5/mdp5_plane.o \ mdp/mdp5/mdp5_smp.o \ msm_atomic.o \ @@ -48,6 +49,7 @@ msm-y := \ msm_gem_prime.o \ msm_gem_shrinker.o \ msm_gem_submit.o \ + msm_gem_vma.o \ msm_gpu.o \ msm_iommu.o \ msm_perf.o \ diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index fd266ed963b6..156abf00c0e2 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -583,7 +583,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) #endif } - if (!gpu->mmu) { + if (!gpu->aspace) { /* TODO we think it is possible to configure the GPU to * restrict access to VRAM carveout. But the required * registers are unknown. For now just bail out and diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index d0d3c7baa8fe..2dc94122a959 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -672,7 +672,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) #endif } - if (!gpu->mmu) { + if (!gpu->aspace) { /* TODO we think it is possible to configure the GPU to * restrict access to VRAM carveout. But the required * registers are unknown. For now just bail out and diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index a904082ed206..e51012ba703c 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -349,7 +349,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, { struct adreno_platform_config *config = pdev->dev.platform_data; struct msm_gpu *gpu = &adreno_gpu->base; - struct msm_mmu *mmu; int ret; adreno_gpu->funcs = funcs; @@ -388,8 +387,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } - mmu = gpu->mmu; - if (mmu) { + if (gpu->aspace && gpu->aspace->mmu) { + struct msm_mmu *mmu = gpu->aspace->mmu; ret = mmu->funcs->attach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index a521207db8a1..b764d7f10312 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c @@ -15,6 +15,7 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <drm/drm_print.h> #include "msm_drv.h" #include "mdp4_kms.h" @@ -29,7 +30,16 @@ void mdp4_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) { + struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler); + static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1); + extern bool dumpstate; + DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus); + + if (dumpstate && __ratelimit(&rs)) { + struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev); + drm_state_dump(mdp4_kms->dev, &p); + } } void mdp4_irq_preinstall(struct msm_kms *kms) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 571a91ee9607..dbdbc2a5b014 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -17,6 +17,7 @@ #include "msm_drv.h" +#include "msm_gem.h" #include "msm_mmu.h" #include "mdp4_kms.h" @@ -159,11 +160,12 @@ static void mdp4_destroy(struct msm_kms *kms) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); struct device *dev = mdp4_kms->dev->dev; - struct msm_mmu *mmu = mdp4_kms->mmu; + struct msm_gem_address_space *aspace = mdp4_kms->aspace; - if (mmu) { - mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); - mmu->funcs->destroy(mmu); + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(aspace); } if (mdp4_kms->blank_cursor_iova) @@ -440,7 +442,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) struct mdp4_platform_config *config = mdp4_get_config(pdev); struct mdp4_kms *mdp4_kms; struct msm_kms *kms = NULL; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; int irq, ret; mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); @@ -531,24 +533,26 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) mdelay(16); if (config->iommu) { - mmu = msm_iommu_new(&pdev->dev, config->iommu); - if (IS_ERR(mmu)) { - ret = PTR_ERR(mmu); + aspace = msm_gem_address_space_create(&pdev->dev, + config->iommu, "mdp4"); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); goto fail; } - ret = mmu->funcs->attach(mmu, iommu_ports, + + mdp4_kms->aspace = aspace; + + ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) goto fail; - - mdp4_kms->mmu = mmu; } else { dev_info(dev->dev, "no iommu, fallback to phys " "contig buffers for scanout\n"); - mmu = NULL; + aspace = NULL; } - mdp4_kms->id = msm_register_mmu(dev, mmu); + mdp4_kms->id = msm_register_address_space(dev, aspace); if (mdp4_kms->id < 0) { ret = mdp4_kms->id; dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret); @@ -598,6 +602,10 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) /* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */ config.max_clk = 266667000; config.iommu = iommu_domain_alloc(&platform_bus_type); + if (config.iommu) { + config.iommu->geometry.aperture_start = 0x1000; + config.iommu->geometry.aperture_end = 0xffffffff; + } return &config; } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index 25fb83997119..8e9d59ed860a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -43,7 +43,7 @@ struct mdp4_kms { struct clk *pclk; struct clk *lut_clk; struct clk *axi_clk; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; struct mdp_irq error_handler; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index 8b4e3004f451..618b2ffed9b4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -550,6 +550,10 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev) static struct mdp5_cfg_platform config = {}; config.iommu = iommu_domain_alloc(&platform_bus_type); + if (config.iommu) { + config.iommu->geometry.aperture_start = 0x1000; + config.iommu->geometry.aperture_end = 0xffffffff; + } return &config; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index c627ab6d0061..b1da66195978 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -32,39 +32,24 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) return to_mdp5_kms(to_mdp_kms(priv->kms)); } -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include <mach/board.h> +#ifdef CONFIG_MSM_BUS_SCALING #include <linux/msm-bus.h> -#include <linux/msm-bus-board.h> -#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ - { \ - .src = MSM_BUS_MASTER_MDP_PORT0, \ - .dst = MSM_BUS_SLAVE_EBI_CH0, \ - .ab = (ab_val), \ - .ib = (ib_val), \ - } - -static struct msm_bus_vectors mdp_bus_vectors[] = { - MDP_BUS_VECTOR_ENTRY(0, 0), - MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), -}; -static struct msm_bus_paths mdp_bus_usecases[] = { { - .num_paths = 1, - .vectors = &mdp_bus_vectors[0], -}, { - .num_paths = 1, - .vectors = &mdp_bus_vectors[1], -} }; -static struct msm_bus_scale_pdata mdp_bus_scale_table = { - .usecase = mdp_bus_usecases, - .num_usecases = ARRAY_SIZE(mdp_bus_usecases), - .name = "mdss_mdp", -}; static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) { - mdp5_cmd_enc->bsc = msm_bus_scale_register_client( - &mdp_bus_scale_table); + struct drm_encoder *encoder = &mdp5_cmd_enc->base; + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct platform_device *pdev = mdp5_kms->pdev; + struct msm_bus_scale_pdata *bus_scale_table; + + bus_scale_table = msm_bus_cl_get_pdata(pdev); + if (!bus_scale_table) { + DBG("bus scaling is disabled\n"); + } else { + mdp5_cmd_enc->bsc = msm_bus_scale_register_client( + bus_scale_table); + } + DBG("bus scale client: %08x", mdp5_cmd_enc->bsc); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index c205c360e16d..38292e474098 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -31,7 +31,6 @@ struct mdp5_crtc { struct drm_crtc base; - char name[8]; int id; bool enabled; @@ -102,7 +101,7 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask); + DBG("%s: flush=%08x", crtc->name, flush_mask); return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask); } @@ -136,7 +135,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_pending_vblank_event *event; - struct drm_plane *plane; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); @@ -148,16 +146,12 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) */ if (!file || (event->base.file_priv == file)) { mdp5_crtc->event = NULL; - DBG("%s: send event: %p", mdp5_crtc->name, event); + DBG("%s: send event: %p", crtc->name, event); drm_crtc_send_vblank_event(crtc, event); } } spin_unlock_irqrestore(&dev->event_lock, flags); - drm_atomic_crtc_for_each_plane(plane, crtc) { - mdp5_plane_complete_flip(plane); - } - if (mdp5_crtc->ctl && !crtc->state->enable) { /* set STAGE_UNUSED for all layers */ mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0); @@ -295,7 +289,7 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) mode = &crtc->state->adjusted_mode; DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", - mdp5_crtc->name, mode->base.id, mode->name, + crtc->name, mode->base.id, mode->name, mode->vrefresh, mode->clock, mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal, @@ -315,7 +309,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc) struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); - DBG("%s", mdp5_crtc->name); + DBG("%s", crtc->name); if (WARN_ON(!mdp5_crtc->enabled)) return; @@ -334,7 +328,7 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc) struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); - DBG("%s", mdp5_crtc->name); + DBG("%s", crtc->name); if (WARN_ON(mdp5_crtc->enabled)) return; @@ -357,6 +351,8 @@ static int pstate_cmp(const void *a, const void *b) { struct plane_state *pa = (struct plane_state *)a; struct plane_state *pb = (struct plane_state *)b; + if (pa->state->zpos == pb->state->zpos) + return pa->plane->base.id - pb->plane->base.id; return pa->state->zpos - pb->state->zpos; } @@ -372,7 +368,6 @@ static bool is_fullscreen(struct drm_crtc_state *cstate, static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; struct drm_device *dev = crtc->dev; @@ -381,7 +376,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, const struct drm_plane_state *pstate; int cnt = 0, base = 0, i; - DBG("%s: check", mdp5_crtc->name); + DBG("%s: check", crtc->name); drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { pstates[cnt].plane = plane; @@ -405,14 +400,14 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); if ((cnt + base) >= hw_cfg->lm.nb_stages) { - dev_err(dev->dev, "too many planes!\n"); + dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base); return -EINVAL; } for (i = 0; i < cnt; i++) { pstates[i].state->stage = STAGE_BASE + i + base; - DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name, - pipe2name(mdp5_plane_pipe(pstates[i].plane)), + DBG("%s: assign pipe %s on stage=%d", crtc->name, + pstates[i].plane->name, pstates[i].state->stage); } @@ -422,8 +417,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - DBG("%s: begin", mdp5_crtc->name); + DBG("%s: begin", crtc->name); } static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, @@ -433,7 +427,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; unsigned long flags; - DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event); + DBG("%s: event: %p", crtc->name, crtc->state->event); WARN_ON(mdp5_crtc->event); @@ -518,6 +512,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, if (!handle) { DBG("Cursor off"); cursor_enable = false; + mdp5_enable(mdp5_kms); goto set_cursor; } @@ -541,6 +536,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, get_roi(crtc, &roi_w, &roi_h); + mdp5_enable(mdp5_kms); + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); @@ -569,6 +566,7 @@ set_cursor: crtc_flush(crtc, flush_mask); end: + mdp5_disable(mdp5_kms); if (old_bo) { drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); /* enable vblank to complete cursor work: */ @@ -595,6 +593,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) get_roi(crtc, &roi_w, &roi_h); + mdp5_enable(mdp5_kms); + spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | @@ -606,6 +606,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) crtc_flush(crtc, flush_mask); + mdp5_disable(mdp5_kms); + return 0; } @@ -653,7 +655,7 @@ static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) { struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); - DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); + DBG("%s: error: %08x", mdp5_crtc->base.name, irqstatus); } static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus) @@ -775,9 +777,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq; - snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", - pipe2name(mdp5_plane_pipe(plane)), id); - drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs, NULL); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index fe0c22230883..a1ea3497fbb4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -38,39 +38,24 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) return to_mdp5_kms(to_mdp_kms(priv->kms)); } -#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING -#include <mach/board.h> -#include <mach/msm_bus.h> -#include <mach/msm_bus_board.h> -#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ - { \ - .src = MSM_BUS_MASTER_MDP_PORT0, \ - .dst = MSM_BUS_SLAVE_EBI_CH0, \ - .ab = (ab_val), \ - .ib = (ib_val), \ - } - -static struct msm_bus_vectors mdp_bus_vectors[] = { - MDP_BUS_VECTOR_ENTRY(0, 0), - MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), -}; -static struct msm_bus_paths mdp_bus_usecases[] = { { - .num_paths = 1, - .vectors = &mdp_bus_vectors[0], -}, { - .num_paths = 1, - .vectors = &mdp_bus_vectors[1], -} }; -static struct msm_bus_scale_pdata mdp_bus_scale_table = { - .usecase = mdp_bus_usecases, - .num_usecases = ARRAY_SIZE(mdp_bus_usecases), - .name = "mdss_mdp", -}; +#ifdef CONFIG_MSM_BUS_SCALING +#include <linux/msm-bus.h> static void bs_init(struct mdp5_encoder *mdp5_encoder) { - mdp5_encoder->bsc = msm_bus_scale_register_client( - &mdp_bus_scale_table); + struct drm_encoder *encoder = &mdp5_encoder->base; + struct mdp5_kms *mdp5_kms = get_kms(encoder); + struct platform_device *pdev = mdp5_kms->pdev; + struct msm_bus_scale_pdata *bus_scale_table; + + bus_scale_table = msm_bus_cl_get_pdata(pdev); + if (!bus_scale_table) { + DBG("bus scaling is disabled\n"); + } else { + mdp5_encoder->bsc = msm_bus_scale_register_client( + bus_scale_table); + } + DBG("bus scale client: %08x", mdp5_encoder->bsc); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index d53e5510fd7c..3ce8b9dec9c1 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -17,6 +17,8 @@ #include <linux/irq.h> +#include <drm/drm_print.h> + #include "msm_drv.h" #include "mdp5_kms.h" @@ -30,7 +32,18 @@ void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) { + struct mdp5_kms *mdp5_kms = container_of(irq, struct mdp5_kms, error_handler); + static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1); + extern bool dumpstate; + DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus); + + if (dumpstate && __ratelimit(&rs)) { + struct drm_printer p = drm_info_printer(mdp5_kms->dev->dev); + drm_state_dump(mdp5_kms->dev, &p); + if (mdp5_kms->smp) + mdp5_smp_dump(mdp5_kms->smp, &p); + } } void mdp5_irq_preinstall(struct msm_kms *kms) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index ed7143d35b25..f346290c82f5 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -19,6 +19,7 @@ #include <linux/of_irq.h> #include "msm_drv.h" +#include "msm_gem.h" #include "msm_mmu.h" #include "mdp5_kms.h" @@ -71,21 +72,57 @@ static int mdp5_hw_init(struct msm_kms *kms) return 0; } +struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct msm_kms_state *state = to_kms_state(s); + struct mdp5_state *new_state; + int ret; + + if (state->state) + return state->state; + + ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); + if (!new_state) + return ERR_PTR(-ENOMEM); + + /* Copy state: */ + new_state->hwpipe = mdp5_kms->state->hwpipe; + if (mdp5_kms->smp) + new_state->smp = mdp5_kms->state->smp; + + state->state = new_state; + + return new_state; +} + +static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state) +{ + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + swap(to_kms_state(state)->state, mdp5_kms->state); +} + static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + mdp5_enable(mdp5_kms); + + if (mdp5_kms->smp) + mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp); } static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { - int i; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct drm_plane *plane; - struct drm_plane_state *plane_state; - for_each_plane_in_state(state, plane, plane_state, i) - mdp5_plane_complete_commit(plane, plane_state); + if (mdp5_kms->smp) + mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); mdp5_disable(mdp5_kms); } @@ -117,14 +154,66 @@ static int mdp5_set_split_display(struct msm_kms *kms, static void mdp5_kms_destroy(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct msm_mmu *mmu = mdp5_kms->mmu; + struct msm_gem_address_space *aspace = mdp5_kms->aspace; + int i; - if (mmu) { - mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); - mmu->funcs->destroy(mmu); + for (i = 0; i < mdp5_kms->num_hwpipes; i++) + mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); + + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(aspace); } } +#ifdef CONFIG_DEBUG_FS +static int smp_show(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct msm_drm_private *priv = dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct drm_printer p = drm_seq_file_printer(m); + + if (!mdp5_kms->smp) { + drm_printf(&p, "no SMP pool\n"); + return 0; + } + + mdp5_smp_dump(mdp5_kms->smp, &p); + + return 0; +} + +static struct drm_info_list mdp5_debugfs_list[] = { + {"smp", smp_show }, +}; + +static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + int ret; + + ret = drm_debugfs_create_files(mdp5_debugfs_list, + ARRAY_SIZE(mdp5_debugfs_list), + minor->debugfs_root, minor); + + if (ret) { + dev_err(dev->dev, "could not install mdp5_debugfs_list\n"); + return ret; + } + + return 0; +} + +static void mdp5_kms_debugfs_cleanup(struct msm_kms *kms, struct drm_minor *minor) +{ + drm_debugfs_remove_files(mdp5_debugfs_list, + ARRAY_SIZE(mdp5_debugfs_list), minor); +} +#endif + static const struct mdp_kms_funcs kms_funcs = { .base = { .hw_init = mdp5_hw_init, @@ -134,6 +223,7 @@ static const struct mdp_kms_funcs kms_funcs = { .irq = mdp5_irq, .enable_vblank = mdp5_enable_vblank, .disable_vblank = mdp5_disable_vblank, + .swap_state = mdp5_swap_state, .prepare_commit = mdp5_prepare_commit, .complete_commit = mdp5_complete_commit, .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done, @@ -141,6 +231,10 @@ static const struct mdp_kms_funcs kms_funcs = { .round_pixclk = mdp5_round_pixclk, .set_split_display = mdp5_set_split_display, .destroy = mdp5_kms_destroy, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = mdp5_kms_debugfs_init, + .debugfs_cleanup = mdp5_kms_debugfs_cleanup, +#endif }, .set_irqmask = mdp5_set_irqmask, }; @@ -321,15 +415,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) static int modeset_init(struct mdp5_kms *mdp5_kms) { - static const enum mdp5_pipe crtcs[] = { - SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, - }; - static const enum mdp5_pipe vig_planes[] = { - SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, - }; - static const enum mdp5_pipe dma_planes[] = { - SSPP_DMA0, SSPP_DMA1, - }; struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; const struct mdp5_cfg_hw *hw_cfg; @@ -337,58 +422,35 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - /* construct CRTCs and their private planes: */ - for (i = 0; i < hw_cfg->pipe_rgb.count; i++) { + /* Construct planes equaling the number of hw pipes, and CRTCs + * for the N layer-mixers (LM). The first N planes become primary + * planes for the CRTCs, with the remainder as overlay planes: + */ + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + bool primary = i < mdp5_cfg->lm.count; struct drm_plane *plane; struct drm_crtc *crtc; - plane = mdp5_plane_init(dev, crtcs[i], true, - hw_cfg->pipe_rgb.base[i], hw_cfg->pipe_rgb.caps); + plane = mdp5_plane_init(dev, primary); if (IS_ERR(plane)) { ret = PTR_ERR(plane); - dev_err(dev->dev, "failed to construct plane for %s (%d)\n", - pipe2name(crtcs[i]), ret); + dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret); goto fail; } + priv->planes[priv->num_planes++] = plane; + + if (!primary) + continue; crtc = mdp5_crtc_init(dev, plane, i); if (IS_ERR(crtc)) { ret = PTR_ERR(crtc); - dev_err(dev->dev, "failed to construct crtc for %s (%d)\n", - pipe2name(crtcs[i]), ret); + dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); goto fail; } priv->crtcs[priv->num_crtcs++] = crtc; } - /* Construct video planes: */ - for (i = 0; i < hw_cfg->pipe_vig.count; i++) { - struct drm_plane *plane; - - plane = mdp5_plane_init(dev, vig_planes[i], false, - hw_cfg->pipe_vig.base[i], hw_cfg->pipe_vig.caps); - if (IS_ERR(plane)) { - ret = PTR_ERR(plane); - dev_err(dev->dev, "failed to construct %s plane: %d\n", - pipe2name(vig_planes[i]), ret); - goto fail; - } - } - - /* DMA planes */ - for (i = 0; i < hw_cfg->pipe_dma.count; i++) { - struct drm_plane *plane; - - plane = mdp5_plane_init(dev, dma_planes[i], false, - hw_cfg->pipe_dma.base[i], hw_cfg->pipe_dma.caps); - if (IS_ERR(plane)) { - ret = PTR_ERR(plane); - dev_err(dev->dev, "failed to construct %s plane: %d\n", - pipe2name(dma_planes[i]), ret); - goto fail; - } - } - /* Construct encoders and modeset initialize connector devices * for each external display interface. */ @@ -532,6 +594,11 @@ static int mdp5_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, return -EINVAL; } + if (crtc->state && !crtc->state->active) { + DBG("crtc disabled %d", pipe); + return -EBUSY; + } + return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, vblank_time, flags, &crtc->mode); @@ -550,6 +617,11 @@ static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe) if (!crtc) return 0; + if (crtc->state && !crtc->state->active) { + DBG("crtc disabled %d", pipe); + return -EBUSY; + } + encoder = get_encoder_from_crtc(crtc); if (!encoder) return 0; @@ -564,7 +636,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) struct mdp5_kms *mdp5_kms; struct mdp5_cfg *config; struct msm_kms *kms; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; int irq, i, ret; /* priv->kms would have been populated by the MDP5 driver */ @@ -606,30 +678,29 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) mdelay(16); if (config->platform.iommu) { - mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); - if (IS_ERR(mmu)) { - ret = PTR_ERR(mmu); - dev_err(&pdev->dev, "failed to init iommu: %d\n", ret); - iommu_domain_free(config->platform.iommu); + aspace = msm_gem_address_space_create(&pdev->dev, + config->platform.iommu, "mdp5"); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); goto fail; } - ret = mmu->funcs->attach(mmu, iommu_ports, + mdp5_kms->aspace = aspace; + + ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) { dev_err(&pdev->dev, "failed to attach iommu: %d\n", ret); - mmu->funcs->destroy(mmu); goto fail; } } else { dev_info(&pdev->dev, "no iommu, fallback to phys contig buffers for scanout\n"); - mmu = NULL; + aspace = NULL;; } - mdp5_kms->mmu = mmu; - mdp5_kms->id = msm_register_mmu(dev, mmu); + mdp5_kms->id = msm_register_address_space(dev, aspace); if (mdp5_kms->id < 0) { ret = mdp5_kms->id; dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret); @@ -644,8 +715,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; - dev->mode_config.max_width = config->hw->lm.max_width; - dev->mode_config.max_height = config->hw->lm.max_height; + dev->mode_config.max_width = 0xffff; + dev->mode_config.max_height = 0xffff; dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp; dev->driver->get_scanout_position = mdp5_get_scanoutpos; @@ -673,6 +744,69 @@ static void mdp5_destroy(struct platform_device *pdev) if (mdp5_kms->rpm_enabled) pm_runtime_disable(&pdev->dev); + + kfree(mdp5_kms->state); +} + +static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, + const enum mdp5_pipe *pipes, const uint32_t *offsets, + uint32_t caps) +{ + struct drm_device *dev = mdp5_kms->dev; + int i, ret; + + for (i = 0; i < cnt; i++) { + struct mdp5_hw_pipe *hwpipe; + + hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps); + if (IS_ERR(hwpipe)) { + ret = PTR_ERR(hwpipe); + dev_err(dev->dev, "failed to construct pipe for %s (%d)\n", + pipe2name(pipes[i]), ret); + return ret; + } + hwpipe->idx = mdp5_kms->num_hwpipes; + mdp5_kms->hwpipes[mdp5_kms->num_hwpipes++] = hwpipe; + } + + return 0; +} + +static int hwpipe_init(struct mdp5_kms *mdp5_kms) +{ + static const enum mdp5_pipe rgb_planes[] = { + SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, + }; + static const enum mdp5_pipe vig_planes[] = { + SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3, + }; + static const enum mdp5_pipe dma_planes[] = { + SSPP_DMA0, SSPP_DMA1, + }; + const struct mdp5_cfg_hw *hw_cfg; + int ret; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + /* Construct RGB pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_rgb.count, rgb_planes, + hw_cfg->pipe_rgb.base, hw_cfg->pipe_rgb.caps); + if (ret) + return ret; + + /* Construct video (VIG) pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_vig.count, vig_planes, + hw_cfg->pipe_vig.base, hw_cfg->pipe_vig.caps); + if (ret) + return ret; + + /* Construct DMA pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_dma.count, dma_planes, + hw_cfg->pipe_dma.base, hw_cfg->pipe_dma.caps); + if (ret) + return ret; + + return 0; } static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) @@ -696,6 +830,13 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) mdp5_kms->dev = dev; mdp5_kms->pdev = pdev; + drm_modeset_lock_init(&mdp5_kms->state_lock); + mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); + if (!mdp5_kms->state) { + ret = -ENOMEM; + goto fail; + } + mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); if (IS_ERR(mdp5_kms->mmio)) { ret = PTR_ERR(mdp5_kms->mmio); @@ -749,7 +890,7 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) * this section initializes the SMP: */ if (mdp5_kms->caps & MDP_CAP_SMP) { - mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp); + mdp5_kms->smp = mdp5_smp_init(mdp5_kms, &config->hw->smp); if (IS_ERR(mdp5_kms->smp)) { ret = PTR_ERR(mdp5_kms->smp); mdp5_kms->smp = NULL; @@ -764,6 +905,10 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) goto fail; } + ret = hwpipe_init(mdp5_kms); + if (ret) + goto fail; + /* set uninit-ed kms */ priv->kms = &mdp5_kms->base.base; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 03738927be10..f4bd7babd118 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -24,8 +24,11 @@ #include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ #include "mdp5.xml.h" #include "mdp5_ctl.h" +#include "mdp5_pipe.h" #include "mdp5_smp.h" +struct mdp5_state; + struct mdp5_kms { struct mdp_kms base; @@ -33,13 +36,21 @@ struct mdp5_kms { struct platform_device *pdev; + unsigned num_hwpipes; + struct mdp5_hw_pipe *hwpipes[16]; + struct mdp5_cfg_handler *cfg; uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ + /** + * Global atomic state. Do not access directly, use mdp5_get_state() + */ + struct mdp5_state *state; + struct drm_modeset_lock state_lock; /* mapper-id used to request GEM buffer mapped for scanout: */ int id; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; struct mdp5_smp *smp; struct mdp5_ctl_manager *ctlm; @@ -65,9 +76,27 @@ struct mdp5_kms { }; #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) +/* Global atomic state for tracking resources that are shared across + * multiple kms objects (planes/crtcs/etc). + * + * For atomic updates which require modifying global state, + */ +struct mdp5_state { + struct mdp5_hw_pipe_state hwpipe; + struct mdp5_smp_state smp; +}; + +struct mdp5_state *__must_check +mdp5_get_state(struct drm_atomic_state *s); + +/* Atomic plane state. Subclasses the base drm_plane_state in order to + * track assigned hwpipe and hw specific state. + */ struct mdp5_plane_state { struct drm_plane_state base; + struct mdp5_hw_pipe *hwpipe; + /* aligned with property */ uint8_t premultiplied; uint8_t zpos; @@ -75,13 +104,6 @@ struct mdp5_plane_state { /* assigned by crtc blender */ enum mdp_mixer_stage_id stage; - - /* some additional transactional status to help us know in the - * apply path whether we need to update SMP allocation, and - * whether current update is still pending: - */ - bool mode_changed : 1; - bool pending : 1; }; #define to_mdp5_plane_state(x) \ container_of(x, struct mdp5_plane_state, base) @@ -114,6 +136,18 @@ static inline u32 mdp5_read(struct mdp5_kms *mdp5_kms, u32 reg) return msm_readl(mdp5_kms->mmio + reg); } +static inline const char *stage2name(enum mdp_mixer_stage_id stage) +{ + static const char *names[] = { +#define NAME(n) [n] = #n + NAME(STAGE_UNUSED), NAME(STAGE_BASE), + NAME(STAGE0), NAME(STAGE1), NAME(STAGE2), + NAME(STAGE3), NAME(STAGE4), NAME(STAGE6), +#undef NAME + }; + return names[stage]; +} + static inline const char *pipe2name(enum mdp5_pipe pipe) { static const char *names[] = { @@ -196,13 +230,8 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); -void mdp5_plane_complete_flip(struct drm_plane *plane); -void mdp5_plane_complete_commit(struct drm_plane *plane, - struct drm_plane_state *state); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); -struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum mdp5_pipe pipe, bool private_plane, - uint32_t reg_offset, uint32_t caps); +struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c new file mode 100644 index 000000000000..720f5f255696 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "mdp5_kms.h" + +struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s, + struct drm_plane *plane, uint32_t caps, uint32_t blkcfg) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_state *state = mdp5_get_state(s); + struct mdp5_hw_pipe_state *old_state, *new_state; + struct mdp5_hw_pipe *hwpipe = NULL; + int i; + + if (IS_ERR(state)) + return ERR_CAST(state); + + /* grab old_state after mdp5_get_state(), since now we hold lock: */ + old_state = &mdp5_kms->state->hwpipe; + new_state = &state->hwpipe; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i]; + + /* skip if already in-use.. check both new and old state, + * since we cannot immediately re-use a pipe that is + * released in the current update in some cases: + * (1) mdp5 has SMP (non-double-buffered) + * (2) hw pipe previously assigned to different CRTC + * (vblanks might not be aligned) + */ + if (new_state->hwpipe_to_plane[cur->idx] || + old_state->hwpipe_to_plane[cur->idx]) + continue; + + /* skip if doesn't support some required caps: */ + if (caps & ~cur->caps) + continue; + + /* possible candidate, take the one with the + * fewest unneeded caps bits set: + */ + if (!hwpipe || (hweight_long(cur->caps & ~caps) < + hweight_long(hwpipe->caps & ~caps))) + hwpipe = cur; + } + + if (!hwpipe) + return ERR_PTR(-ENOMEM); + + if (mdp5_kms->smp) { + int ret; + + DBG("%s: alloc SMP blocks", hwpipe->name); + ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp, + hwpipe->pipe, blkcfg); + if (ret) + return ERR_PTR(-ENOMEM); + + hwpipe->blkcfg = blkcfg; + } + + DBG("%s: assign to plane %s for caps %x", + hwpipe->name, plane->name, caps); + new_state->hwpipe_to_plane[hwpipe->idx] = plane; + + return hwpipe; +} + +void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_state *state = mdp5_get_state(s); + struct mdp5_hw_pipe_state *new_state = &state->hwpipe; + + if (!hwpipe) + return; + + if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx])) + return; + + DBG("%s: release from plane %s", hwpipe->name, + new_state->hwpipe_to_plane[hwpipe->idx]->name); + + if (mdp5_kms->smp) { + DBG("%s: free SMP blocks", hwpipe->name); + mdp5_smp_release(mdp5_kms->smp, &state->smp, hwpipe->pipe); + } + + new_state->hwpipe_to_plane[hwpipe->idx] = NULL; +} + +void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe) +{ + kfree(hwpipe); +} + +struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, + uint32_t reg_offset, uint32_t caps) +{ + struct mdp5_hw_pipe *hwpipe; + + hwpipe = kzalloc(sizeof(*hwpipe), GFP_KERNEL); + if (!hwpipe) + return ERR_PTR(-ENOMEM); + + hwpipe->name = pipe2name(pipe); + hwpipe->pipe = pipe; + hwpipe->reg_offset = reg_offset; + hwpipe->caps = caps; + hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe); + + spin_lock_init(&hwpipe->pipe_lock); + + return hwpipe; +} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h new file mode 100644 index 000000000000..26643ac7763c --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __MDP5_PIPE_H__ +#define __MDP5_PIPE_H__ + +/* represents a hw pipe, which is dynamically assigned to a plane */ +struct mdp5_hw_pipe { + int idx; + + const char *name; + enum mdp5_pipe pipe; + + spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */ + uint32_t reg_offset; + uint32_t caps; + + uint32_t flush_mask; /* used to commit pipe registers */ + + /* number of smp blocks per plane, ie: + * nblks_y | (nblks_u << 8) | (nblks_v << 16) + */ + uint32_t blkcfg; +}; + +/* global atomic state of assignment between pipes and planes: */ +struct mdp5_hw_pipe_state { + struct drm_plane *hwpipe_to_plane[16]; +}; + +struct mdp5_hw_pipe *__must_check +mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, + uint32_t caps, uint32_t blkcfg); +void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe); + +struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, + uint32_t reg_offset, uint32_t caps); +void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe); + +#endif /* __MDP5_PIPE_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 83bf997dda03..9e84c17a587a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -16,19 +16,11 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <drm/drm_print.h> #include "mdp5_kms.h" struct mdp5_plane { struct drm_plane base; - const char *name; - - enum mdp5_pipe pipe; - - spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */ - uint32_t reg_offset; - uint32_t caps; - - uint32_t flush_mask; /* used to commit pipe registers */ uint32_t nformats; uint32_t formats[32]; @@ -69,21 +61,12 @@ static void mdp5_plane_destroy(struct drm_plane *plane) static void mdp5_plane_install_rotation_property(struct drm_device *dev, struct drm_plane *plane) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - - if (!(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP) && - !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) - return; - - if (!dev->mode_config.rotation_property) - dev->mode_config.rotation_property = - drm_mode_create_rotation_property(dev, - DRM_ROTATE_0 | DRM_REFLECT_X | DRM_REFLECT_Y); - - if (dev->mode_config.rotation_property) - drm_object_attach_property(&plane->base, - dev->mode_config.rotation_property, - DRM_ROTATE_0); + drm_plane_create_rotation_property(plane, + DRM_ROTATE_0, + DRM_ROTATE_0 | + DRM_ROTATE_180 | + DRM_REFLECT_X | + DRM_REFLECT_Y); } /* helper to install properties which are common to planes and crtcs */ @@ -120,6 +103,7 @@ static void mdp5_plane_install_properties(struct drm_plane *plane, ARRAY_SIZE(name##_prop_enum_list)) INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1); + INSTALL_RANGE_PROPERTY(alpha, ALPHA, 0, 255, 255); mdp5_plane_install_rotation_property(dev, plane); @@ -148,6 +132,7 @@ static int mdp5_plane_atomic_set_property(struct drm_plane *plane, } while (0) SET_PROPERTY(zpos, ZPOS, uint8_t); + SET_PROPERTY(alpha, ALPHA, uint8_t); dev_err(dev->dev, "Invalid property\n"); ret = -EINVAL; @@ -176,6 +161,7 @@ static int mdp5_plane_atomic_get_property(struct drm_plane *plane, } while (0) GET_PROPERTY(zpos, ZPOS, uint8_t); + GET_PROPERTY(alpha, ALPHA, uint8_t); dev_err(dev->dev, "Invalid property\n"); ret = -EINVAL; @@ -184,6 +170,20 @@ done: #undef SET_PROPERTY } +static void +mdp5_plane_atomic_print_state(struct drm_printer *p, + const struct drm_plane_state *state) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); + + drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ? + pstate->hwpipe->name : "(null)"); + drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied); + drm_printf(p, "\tzpos=%u\n", pstate->zpos); + drm_printf(p, "\talpha=%u\n", pstate->alpha); + drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); +} + static void mdp5_plane_reset(struct drm_plane *plane) { struct mdp5_plane_state *mdp5_state; @@ -222,19 +222,18 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) if (mdp5_state && mdp5_state->base.fb) drm_framebuffer_reference(mdp5_state->base.fb); - mdp5_state->mode_changed = false; - mdp5_state->pending = false; - return &mdp5_state->base; } static void mdp5_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { + struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); + if (state->fb) drm_framebuffer_unreference(state->fb); - kfree(to_mdp5_plane_state(state)); + kfree(pstate); } static const struct drm_plane_funcs mdp5_plane_funcs = { @@ -247,99 +246,117 @@ static const struct drm_plane_funcs mdp5_plane_funcs = { .reset = mdp5_plane_reset, .atomic_duplicate_state = mdp5_plane_duplicate_state, .atomic_destroy_state = mdp5_plane_destroy_state, + .atomic_print_state = mdp5_plane_atomic_print_state, }; static int mdp5_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); struct drm_framebuffer *fb = new_state->fb; if (!new_state->fb) return 0; - DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id); + DBG("%s: prepare: FB[%u]", plane->name, fb->base.id); return msm_framebuffer_prepare(fb, mdp5_kms->id); } static void mdp5_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); struct drm_framebuffer *fb = old_state->fb; if (!fb) return; - DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id); + DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id); msm_framebuffer_cleanup(fb, mdp5_kms->id); } static int mdp5_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); struct drm_plane_state *old_state = plane->state; - const struct mdp_format *format; - bool vflip, hflip; + struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); + bool new_hwpipe = false; + uint32_t max_width, max_height; + uint32_t caps = 0; - DBG("%s: check (%d -> %d)", mdp5_plane->name, + DBG("%s: check (%d -> %d)", plane->name, plane_enabled(old_state), plane_enabled(state)); + max_width = config->hw->lm.max_width << 16; + max_height = config->hw->lm.max_height << 16; + + /* Make sure source dimensions are within bounds. */ + if ((state->src_w > max_width) || (state->src_h > max_height)) { + struct drm_rect src = drm_plane_state_src(state); + DBG("Invalid source size "DRM_RECT_FP_FMT, + DRM_RECT_FP_ARG(&src)); + return -ERANGE; + } + if (plane_enabled(state)) { + unsigned int rotation; + const struct mdp_format *format; + struct mdp5_kms *mdp5_kms = get_kms(plane); + uint32_t blkcfg = 0; + format = to_mdp_format(msm_framebuffer_format(state->fb)); - if (MDP_FORMAT_IS_YUV(format) && - !pipe_supports_yuv(mdp5_plane->caps)) { - DBG("Pipe doesn't support YUV\n"); + if (MDP_FORMAT_IS_YUV(format)) + caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC; - return -EINVAL; - } + if (((state->src_w >> 16) != state->crtc_w) || + ((state->src_h >> 16) != state->crtc_h)) + caps |= MDP_PIPE_CAP_SCALE; - if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) && - (((state->src_w >> 16) != state->crtc_w) || - ((state->src_h >> 16) != state->crtc_h))) { - DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n", - state->src_w >> 16, state->src_h >> 16, - state->crtc_w, state->crtc_h); + rotation = drm_rotation_simplify(state->rotation, + DRM_ROTATE_0 | + DRM_REFLECT_X | + DRM_REFLECT_Y); - return -EINVAL; - } + if (rotation & DRM_REFLECT_X) + caps |= MDP_PIPE_CAP_HFLIP; - hflip = !!(state->rotation & DRM_REFLECT_X); - vflip = !!(state->rotation & DRM_REFLECT_Y); - if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || - (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { - DBG("Pipe doesn't support flip\n"); + if (rotation & DRM_REFLECT_Y) + caps |= MDP_PIPE_CAP_VFLIP; - return -EINVAL; - } - } + /* (re)allocate hw pipe if we don't have one or caps-mismatch: */ + if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) + new_hwpipe = true; - if (plane_enabled(state) && plane_enabled(old_state)) { - /* we cannot change SMP block configuration during scanout: */ - bool full_modeset = false; - if (state->fb->pixel_format != old_state->fb->pixel_format) { - DBG("%s: pixel_format change!", mdp5_plane->name); - full_modeset = true; - } - if (state->src_w != old_state->src_w) { - DBG("%s: src_w change!", mdp5_plane->name); - full_modeset = true; - } - if (to_mdp5_plane_state(old_state)->pending) { - DBG("%s: still pending!", mdp5_plane->name); - full_modeset = true; + if (mdp5_kms->smp) { + const struct mdp_format *format = + to_mdp_format(msm_framebuffer_format(state->fb)); + + blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format, + state->src_w >> 16, false); + + if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg)) + new_hwpipe = true; } - if (full_modeset) { - struct drm_crtc_state *crtc_state = - drm_atomic_get_crtc_state(state->state, state->crtc); - crtc_state->mode_changed = true; - to_mdp5_plane_state(state)->mode_changed = true; + + /* (re)assign hwpipe if needed, otherwise keep old one: */ + if (new_hwpipe) { + /* TODO maybe we want to re-assign hwpipe sometimes + * in cases when we no-longer need some caps to make + * it available for other planes? + */ + struct mdp5_hw_pipe *hwpipe = mdp5_state->hwpipe; + mdp5_state->hwpipe = mdp5_pipe_assign(state->state, + plane, caps, blkcfg); + if (IS_ERR(mdp5_state->hwpipe)) { + DBG("%s: failed to assign hwpipe!", plane->name); + return PTR_ERR(mdp5_state->hwpipe); + } + mdp5_pipe_release(state->state, hwpipe); } } else { - to_mdp5_plane_state(state)->mode_changed = true; + mdp5_pipe_release(state->state, mdp5_state->hwpipe); + mdp5_state->hwpipe = NULL; } return 0; @@ -348,16 +365,13 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, static void mdp5_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct drm_plane_state *state = plane->state; - DBG("%s: update", mdp5_plane->name); + DBG("%s: update", plane->name); - if (!plane_enabled(state)) { - to_mdp5_plane_state(state)->pending = true; - } else if (to_mdp5_plane_state(state)->mode_changed) { + if (plane_enabled(state)) { int ret; - to_mdp5_plane_state(state)->pending = true; + ret = mdp5_plane_mode_set(plane, state->crtc, state->fb, state->crtc_x, state->crtc_y, @@ -366,11 +380,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, state->src_w, state->src_h); /* atomic_check should have ensured that this doesn't fail */ WARN_ON(ret < 0); - } else { - unsigned long flags; - spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); - set_scanout_locked(plane, state->fb); - spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags); } } @@ -384,9 +393,9 @@ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { static void set_scanout_locked(struct drm_plane *plane, struct drm_framebuffer *fb) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct mdp5_kms *mdp5_kms = get_kms(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; + struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(plane->state)->hwpipe; + enum mdp5_pipe pipe = hwpipe->pipe; mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | @@ -666,18 +675,19 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct drm_plane_state *pstate = plane->state; + struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; struct mdp5_kms *mdp5_kms = get_kms(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; + enum mdp5_pipe pipe = hwpipe->pipe; const struct mdp_format *format; uint32_t nplanes, config = 0; uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,}; - bool pe = mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT; + bool pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT; int pe_left[COMP_MAX], pe_right[COMP_MAX]; int pe_top[COMP_MAX], pe_bottom[COMP_MAX]; uint32_t hdecm = 0, vdecm = 0; uint32_t pix_format; + unsigned int rotation; bool vflip, hflip; unsigned long flags; int ret; @@ -697,27 +707,10 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, src_w = src_w >> 16; src_h = src_h >> 16; - DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", mdp5_plane->name, + DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name, fb->base.id, src_x, src_y, src_w, src_h, crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); - /* Request some memory from the SMP: */ - if (mdp5_kms->smp) { - ret = mdp5_smp_request(mdp5_kms->smp, - mdp5_plane->pipe, format, src_w, false); - if (ret) - return ret; - } - - /* - * Currently we update the hw for allocations/requests immediately, - * but once atomic modeset/pageflip is in place, the allocation - * would move into atomic->check_plane_state(), while updating the - * hw would remain here: - */ - if (mdp5_kms->smp) - mdp5_smp_configure(mdp5_kms->smp, pipe); - ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step); if (ret) return ret; @@ -726,7 +719,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, if (ret) return ret; - if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT) { + if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) { calc_pixel_ext(format, src_w, crtc_w, phasex_step, pe_left, pe_right, true); calc_pixel_ext(format, src_h, crtc_h, phasey_step, @@ -740,14 +733,18 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, config |= get_scale_config(format, src_h, crtc_h, false); DBG("scale config = %x", config); - hflip = !!(pstate->rotation & DRM_REFLECT_X); - vflip = !!(pstate->rotation & DRM_REFLECT_Y); + rotation = drm_rotation_simplify(pstate->rotation, + DRM_ROTATE_0 | + DRM_REFLECT_X | + DRM_REFLECT_Y); + hflip = !!(rotation & DRM_REFLECT_X); + vflip = !!(rotation & DRM_REFLECT_Y); - spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); + spin_lock_irqsave(&hwpipe->pipe_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), - MDP5_PIPE_SRC_IMG_SIZE_WIDTH(fb->width) | - MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(fb->height)); + MDP5_PIPE_SRC_IMG_SIZE_WIDTH(min(fb->width, src_w)) | + MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(min(fb->height, src_h))); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe), MDP5_PIPE_SRC_SIZE_WIDTH(src_w) | @@ -792,12 +789,12 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, /* not using secure mode: */ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); - if (mdp5_plane->caps & MDP_PIPE_CAP_SW_PIX_EXT) + if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) mdp5_write_pixel_ext(mdp5_kms, pipe, format, src_w, pe_left, pe_right, src_h, pe_top, pe_bottom); - if (mdp5_plane->caps & MDP_PIPE_CAP_SCALE) { + if (hwpipe->caps & MDP_PIPE_CAP_SCALE) { mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), phasex_step[COMP_0]); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), @@ -812,7 +809,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config); } - if (mdp5_plane->caps & MDP_PIPE_CAP_CSC) { + if (hwpipe->caps & MDP_PIPE_CAP_CSC) { if (MDP_FORMAT_IS_YUV(format)) csc_enable(mdp5_kms, pipe, mdp_get_default_csc_cfg(CSC_YUV2RGB)); @@ -822,56 +819,33 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, set_scanout_locked(plane, fb); - spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags); + spin_unlock_irqrestore(&hwpipe->pipe_lock, flags); return ret; } -void mdp5_plane_complete_flip(struct drm_plane *plane) +enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) { - struct mdp5_kms *mdp5_kms = get_kms(plane); - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; - - DBG("%s: complete flip", mdp5_plane->name); - - if (mdp5_kms->smp) - mdp5_smp_commit(mdp5_kms->smp, pipe); + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); - to_mdp5_plane_state(plane->state)->pending = false; -} + if (WARN_ON(!pstate->hwpipe)) + return 0; -enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) -{ - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - return mdp5_plane->pipe; + return pstate->hwpipe->pipe; } uint32_t mdp5_plane_get_flush(struct drm_plane *plane) { - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - - return mdp5_plane->flush_mask; -} + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); -/* called after vsync in thread context */ -void mdp5_plane_complete_commit(struct drm_plane *plane, - struct drm_plane_state *state) -{ - struct mdp5_kms *mdp5_kms = get_kms(plane); - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; + if (WARN_ON(!pstate->hwpipe)) + return 0; - if (!plane_enabled(plane->state) && mdp5_kms->smp) { - DBG("%s: free SMP", mdp5_plane->name); - mdp5_smp_release(mdp5_kms->smp, pipe); - } + return pstate->hwpipe->flush_mask; } /* initialize plane */ -struct drm_plane *mdp5_plane_init(struct drm_device *dev, - enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset, - uint32_t caps) +struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) { struct drm_plane *plane = NULL; struct mdp5_plane *mdp5_plane; @@ -886,19 +860,10 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, plane = &mdp5_plane->base; - mdp5_plane->pipe = pipe; - mdp5_plane->name = pipe2name(pipe); - mdp5_plane->caps = caps; - mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, - ARRAY_SIZE(mdp5_plane->formats), - !pipe_supports_yuv(mdp5_plane->caps)); - - mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe); - mdp5_plane->reg_offset = reg_offset; - spin_lock_init(&mdp5_plane->pipe_lock); + ARRAY_SIZE(mdp5_plane->formats), false); - type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; + type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, mdp5_plane->formats, mdp5_plane->nformats, type, NULL); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index 27d7b55b52c9..58f712d37e7f 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -21,72 +21,6 @@ #include "mdp5_smp.h" -/* SMP - Shared Memory Pool - * - * These are shared between all the clients, where each plane in a - * scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on - * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR. - * - * Based on the size of the attached scanout buffer, a certain # of - * blocks must be allocated to that client out of the shared pool. - * - * In some hw, some blocks are statically allocated for certain pipes - * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). - * - * For each block that can be dynamically allocated, it can be either - * free: - * The block is free. - * - * pending: - * The block is allocated to some client and not free. - * - * configured: - * The block is allocated to some client, and assigned to that - * client in MDP5_SMP_ALLOC registers. - * - * inuse: - * The block is being actively used by a client. - * - * The updates happen in the following steps: - * - * 1) mdp5_smp_request(): - * When plane scanout is setup, calculate required number of - * blocks needed per client, and request. Blocks neither inuse nor - * configured nor pending by any other client are added to client's - * pending set. - * For shrinking, blocks in pending but not in configured can be freed - * directly, but those already in configured will be freed later by - * mdp5_smp_commit. - * - * 2) mdp5_smp_configure(): - * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers - * are configured for the union(pending, inuse) - * Current pending is copied to configured. - * It is assumed that mdp5_smp_request and mdp5_smp_configure not run - * concurrently for the same pipe. - * - * 3) mdp5_smp_commit(): - * After next vblank, copy configured -> inuse. Optionally update - * MDP5_SMP_ALLOC registers if there are newly unused blocks - * - * 4) mdp5_smp_release(): - * Must be called after the pipe is disabled and no longer uses any SMB - * - * On the next vblank after changes have been committed to hw, the - * client's pending blocks become it's in-use blocks (and no-longer - * in-use blocks become available to other clients). - * - * btw, hurray for confusing overloaded acronyms! :-/ - * - * NOTE: for atomic modeset/pageflip NONBLOCK operations, step #1 - * should happen at (or before)? atomic->check(). And we'd need - * an API to discard previous requests if update is aborted or - * (test-only). - * - * TODO would perhaps be nice to have debugfs to dump out kernel - * inuse and pending state of all clients.. - */ - struct mdp5_smp { struct drm_device *dev; @@ -94,16 +28,8 @@ struct mdp5_smp { int blk_cnt; int blk_size; - - spinlock_t state_lock; - mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */ - - struct mdp5_client_smp_state client_state[MAX_CLIENTS]; }; -static void update_smp_state(struct mdp5_smp *smp, - u32 cid, mdp5_smp_state_t *assigned); - static inline struct mdp5_kms *get_kms(struct mdp5_smp *smp) { @@ -134,57 +60,38 @@ static inline u32 pipe2client(enum mdp5_pipe pipe, int plane) return mdp5_cfg->smp.clients[pipe] + plane; } -/* step #1: update # of blocks pending for the client: */ +/* allocate blocks for the specified request: */ static int smp_request_block(struct mdp5_smp *smp, + struct mdp5_smp_state *state, u32 cid, int nblks) { - struct mdp5_kms *mdp5_kms = get_kms(smp); - struct mdp5_client_smp_state *ps = &smp->client_state[cid]; - int i, ret, avail, cur_nblks, cnt = smp->blk_cnt; + void *cs = state->client_state[cid]; + int i, avail, cnt = smp->blk_cnt; uint8_t reserved; - unsigned long flags; - reserved = smp->reserved[cid]; + /* we shouldn't be requesting blocks for an in-use client: */ + WARN_ON(bitmap_weight(cs, cnt) > 0); - spin_lock_irqsave(&smp->state_lock, flags); + reserved = smp->reserved[cid]; if (reserved) { nblks = max(0, nblks - reserved); DBG("%d MMBs allocated (%d reserved)", nblks, reserved); } - avail = cnt - bitmap_weight(smp->state, cnt); + avail = cnt - bitmap_weight(state->state, cnt); if (nblks > avail) { - dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n", + dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n", nblks, avail); - ret = -ENOSPC; - goto fail; + return -ENOSPC; } - cur_nblks = bitmap_weight(ps->pending, cnt); - if (nblks > cur_nblks) { - /* grow the existing pending reservation: */ - for (i = cur_nblks; i < nblks; i++) { - int blk = find_first_zero_bit(smp->state, cnt); - set_bit(blk, ps->pending); - set_bit(blk, smp->state); - } - } else { - /* shrink the existing pending reservation: */ - for (i = cur_nblks; i > nblks; i--) { - int blk = find_first_bit(ps->pending, cnt); - clear_bit(blk, ps->pending); - - /* clear in global smp_state if not in configured - * otherwise until _commit() - */ - if (!test_bit(blk, ps->configured)) - clear_bit(blk, smp->state); - } + for (i = 0; i < nblks; i++) { + int blk = find_first_zero_bit(state->state, cnt); + set_bit(blk, cs); + set_bit(blk, state->state); } -fail: - spin_unlock_irqrestore(&smp->state_lock, flags); return 0; } @@ -209,14 +116,15 @@ static void set_fifo_thresholds(struct mdp5_smp *smp, * decimated width. Ie. SMP buffering sits downstream of decimation (which * presumably happens during the dma from scanout buffer). */ -int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, - const struct mdp_format *format, u32 width, bool hdecim) +uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, + const struct mdp_format *format, + u32 width, bool hdecim) { struct mdp5_kms *mdp5_kms = get_kms(smp); - struct drm_device *dev = mdp5_kms->dev; int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg); - int i, hsub, nplanes, nlines, nblks, ret; + int i, hsub, nplanes, nlines; u32 fmt = format->base.pixel_format; + uint32_t blkcfg = 0; nplanes = drm_format_num_planes(fmt); hsub = drm_format_horz_chroma_subsampling(fmt); @@ -239,7 +147,7 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, hsub = 1; } - for (i = 0, nblks = 0; i < nplanes; i++) { + for (i = 0; i < nplanes; i++) { int n, fetch_stride, cpp; cpp = drm_format_plane_cpp(fmt, i); @@ -251,60 +159,72 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, if (rev == 0) n = roundup_pow_of_two(n); + blkcfg |= (n << (8 * i)); + } + + return blkcfg; +} + +int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe, uint32_t blkcfg) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + struct drm_device *dev = mdp5_kms->dev; + int i, ret; + + for (i = 0; i < pipe2nclients(pipe); i++) { + u32 cid = pipe2client(pipe, i); + int n = blkcfg & 0xff; + + if (!n) + continue; + DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n); - ret = smp_request_block(smp, pipe2client(pipe, i), n); + ret = smp_request_block(smp, state, cid, n); if (ret) { dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n", n, ret); return ret; } - nblks += n; + blkcfg >>= 8; } - set_fifo_thresholds(smp, pipe, nblks); + state->assigned |= (1 << pipe); return 0; } /* Release SMP blocks for all clients of the pipe */ -void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) +void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe) { int i; - unsigned long flags; int cnt = smp->blk_cnt; for (i = 0; i < pipe2nclients(pipe); i++) { - mdp5_smp_state_t assigned; u32 cid = pipe2client(pipe, i); - struct mdp5_client_smp_state *ps = &smp->client_state[cid]; - - spin_lock_irqsave(&smp->state_lock, flags); - - /* clear hw assignment */ - bitmap_or(assigned, ps->inuse, ps->configured, cnt); - update_smp_state(smp, CID_UNUSED, &assigned); - - /* free to global pool */ - bitmap_andnot(smp->state, smp->state, ps->pending, cnt); - bitmap_andnot(smp->state, smp->state, assigned, cnt); + void *cs = state->client_state[cid]; - /* clear client's infor */ - bitmap_zero(ps->pending, cnt); - bitmap_zero(ps->configured, cnt); - bitmap_zero(ps->inuse, cnt); + /* update global state: */ + bitmap_andnot(state->state, state->state, cs, cnt); - spin_unlock_irqrestore(&smp->state_lock, flags); + /* clear client's state */ + bitmap_zero(cs, cnt); } - set_fifo_thresholds(smp, pipe, 0); + state->released |= (1 << pipe); } -static void update_smp_state(struct mdp5_smp *smp, +/* NOTE: SMP_ALLOC_* regs are *not* double buffered, so release has to + * happen after scanout completes. + */ +static unsigned update_smp_state(struct mdp5_smp *smp, u32 cid, mdp5_smp_state_t *assigned) { struct mdp5_kms *mdp5_kms = get_kms(smp); int cnt = smp->blk_cnt; + unsigned nblks = 0; u32 blk, val; for_each_set_bit(blk, *assigned, cnt) { @@ -330,62 +250,88 @@ static void update_smp_state(struct mdp5_smp *smp, mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val); mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val); + + nblks++; } + + return nblks; } -/* step #2: configure hw for union(pending, inuse): */ -void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe) +void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) { - int cnt = smp->blk_cnt; - mdp5_smp_state_t assigned; - int i; + enum mdp5_pipe pipe; - for (i = 0; i < pipe2nclients(pipe); i++) { - u32 cid = pipe2client(pipe, i); - struct mdp5_client_smp_state *ps = &smp->client_state[cid]; + for_each_set_bit(pipe, &state->assigned, sizeof(state->assigned) * 8) { + unsigned i, nblks = 0; - /* - * if vblank has not happened since last smp_configure - * skip the configure for now - */ - if (!bitmap_equal(ps->inuse, ps->configured, cnt)) - continue; + for (i = 0; i < pipe2nclients(pipe); i++) { + u32 cid = pipe2client(pipe, i); + void *cs = state->client_state[cid]; - bitmap_copy(ps->configured, ps->pending, cnt); - bitmap_or(assigned, ps->inuse, ps->configured, cnt); - update_smp_state(smp, cid, &assigned); + nblks += update_smp_state(smp, cid, cs); + + DBG("assign %s:%u, %u blks", + pipe2name(pipe), i, nblks); + } + + set_fifo_thresholds(smp, pipe, nblks); } + + state->assigned = 0; } -/* step #3: after vblank, copy configured -> inuse: */ -void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) +void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state) { - int cnt = smp->blk_cnt; - mdp5_smp_state_t released; - int i; - - for (i = 0; i < pipe2nclients(pipe); i++) { - u32 cid = pipe2client(pipe, i); - struct mdp5_client_smp_state *ps = &smp->client_state[cid]; + enum mdp5_pipe pipe; - /* - * Figure out if there are any blocks we where previously - * using, which can be released and made available to other - * clients: - */ - if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) { - unsigned long flags; + for_each_set_bit(pipe, &state->released, sizeof(state->released) * 8) { + DBG("release %s", pipe2name(pipe)); + set_fifo_thresholds(smp, pipe, 0); + } - spin_lock_irqsave(&smp->state_lock, flags); - /* clear released blocks: */ - bitmap_andnot(smp->state, smp->state, released, cnt); - spin_unlock_irqrestore(&smp->state_lock, flags); + state->released = 0; +} - update_smp_state(smp, CID_UNUSED, &released); +void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p) +{ + struct mdp5_kms *mdp5_kms = get_kms(smp); + struct mdp5_hw_pipe_state *hwpstate; + struct mdp5_smp_state *state; + int total = 0, i, j; + + drm_printf(p, "name\tinuse\tplane\n"); + drm_printf(p, "----\t-----\t-----\n"); + + if (drm_can_sleep()) + drm_modeset_lock(&mdp5_kms->state_lock, NULL); + + /* grab these *after* we hold the state_lock */ + hwpstate = &mdp5_kms->state->hwpipe; + state = &mdp5_kms->state->smp; + + for (i = 0; i < mdp5_kms->num_hwpipes; i++) { + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; + struct drm_plane *plane = hwpstate->hwpipe_to_plane[hwpipe->idx]; + enum mdp5_pipe pipe = hwpipe->pipe; + for (j = 0; j < pipe2nclients(pipe); j++) { + u32 cid = pipe2client(pipe, j); + void *cs = state->client_state[cid]; + int inuse = bitmap_weight(cs, smp->blk_cnt); + + drm_printf(p, "%s:%d\t%d\t%s\n", + pipe2name(pipe), j, inuse, + plane ? plane->name : NULL); + + total += inuse; } - - bitmap_copy(ps->inuse, ps->configured, cnt); } + + drm_printf(p, "TOTAL:\t%d\t(of %d)\n", total, smp->blk_cnt); + drm_printf(p, "AVAIL:\t%d\n", smp->blk_cnt - + bitmap_weight(state->state, smp->blk_cnt)); + + if (drm_can_sleep()) + drm_modeset_unlock(&mdp5_kms->state_lock); } void mdp5_smp_destroy(struct mdp5_smp *smp) @@ -393,8 +339,9 @@ void mdp5_smp_destroy(struct mdp5_smp *smp) kfree(smp); } -struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg) +struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg) { + struct mdp5_smp_state *state = &mdp5_kms->state->smp; struct mdp5_smp *smp = NULL; int ret; @@ -404,14 +351,13 @@ struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_blo goto fail; } - smp->dev = dev; + smp->dev = mdp5_kms->dev; smp->blk_cnt = cfg->mmb_count; smp->blk_size = cfg->mmb_size; /* statically tied MMBs cannot be re-allocated: */ - bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt); + bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt); memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved)); - spin_lock_init(&smp->state_lock); return smp; fail: diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h index 20b87e800ea3..b41d0448fbe8 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h @@ -19,12 +19,53 @@ #ifndef __MDP5_SMP_H__ #define __MDP5_SMP_H__ +#include <drm/drm_print.h> + #include "msm_drv.h" -struct mdp5_client_smp_state { - mdp5_smp_state_t inuse; - mdp5_smp_state_t configured; - mdp5_smp_state_t pending; +/* + * SMP - Shared Memory Pool: + * + * SMP blocks are shared between all the clients, where each plane in + * a scanout buffer is a SMP client. Ie. scanout of 3 plane I420 on + * pipe VIG0 => 3 clients: VIG0_Y, VIG0_CB, VIG0_CR. + * + * Based on the size of the attached scanout buffer, a certain # of + * blocks must be allocated to that client out of the shared pool. + * + * In some hw, some blocks are statically allocated for certain pipes + * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). + * + * + * Atomic SMP State: + * + * On atomic updates that modify SMP configuration, the state is cloned + * (copied) and modified. For test-only, or in cases where atomic + * update fails (or if we hit ww_mutex deadlock/backoff condition) the + * new state is simply thrown away. + * + * Because the SMP registers are not double buffered, updates are a + * two step process: + * + * 1) in _prepare_commit() we configure things (via read-modify-write) + * for the newly assigned pipes, so we don't take away blocks + * assigned to pipes that are still scanning out + * 2) in _complete_commit(), after vblank/etc, we clear things for the + * released clients, since at that point old pipes are no longer + * scanning out. + */ +struct mdp5_smp_state { + /* global state of what blocks are in use: */ + mdp5_smp_state_t state; + + /* per client state of what blocks they are using: */ + mdp5_smp_state_t client_state[MAX_CLIENTS]; + + /* assigned pipes (hw updated at _prepare_commit()): */ + unsigned long assigned; + + /* released pipes (hw updated at _complete_commit()): */ + unsigned long released; }; struct mdp5_kms; @@ -36,13 +77,22 @@ struct mdp5_smp; * which is then used to call the other mdp5_smp_*(handler, ...) functions. */ -struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg); +struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, + const struct mdp5_smp_block *cfg); void mdp5_smp_destroy(struct mdp5_smp *smp); -int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, - const struct mdp_format *format, u32 width, bool hdecim); -void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe); -void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe); -void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe); +void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p); + +uint32_t mdp5_smp_calculate(struct mdp5_smp *smp, + const struct mdp_format *format, + u32 width, bool hdecim); + +int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe, uint32_t blkcfg); +void mdp5_smp_release(struct mdp5_smp *smp, struct mdp5_smp_state *state, + enum mdp5_pipe pipe); + +void mdp5_smp_prepare_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state); +void mdp5_smp_complete_commit(struct mdp5_smp *smp, struct mdp5_smp_state *state); #endif /* __MDP5_SMP_H__ */ diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 73bae382eac3..bdffd04387dd 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -240,6 +240,10 @@ int msm_atomic_commit(struct drm_device *dev, drm_atomic_helper_swap_state(state, true); + /* swap driver private state while still holding state_lock */ + if (to_kms_state(state)->state) + priv->kms->funcs->swap_state(priv->kms, state); + /* * Everything below can be run asynchronously without the need to grab * any modeset locks at all under one conditions: It must be guaranteed @@ -269,3 +273,30 @@ error: drm_atomic_helper_cleanup_planes(dev, state); return ret; } + +struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev) +{ + struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL); + + if (!state || drm_atomic_state_init(dev, &state->base) < 0) { + kfree(state); + return NULL; + } + + return &state->base; +} + +void msm_atomic_state_clear(struct drm_atomic_state *s) +{ + struct msm_kms_state *state = to_kms_state(s); + drm_atomic_state_default_clear(&state->base); + kfree(state->state); + state->state = NULL; +} + +void msm_atomic_state_free(struct drm_atomic_state *state) +{ + kfree(to_kms_state(state)->state); + drm_atomic_state_default_release(state); + kfree(state); +} diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 663f2b6ef091..c1b40f5adb60 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -18,6 +18,8 @@ #ifdef CONFIG_DEBUG_FS #include "msm_drv.h" #include "msm_gpu.h" +#include "msm_kms.h" +#include "msm_debugfs.h" static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) { @@ -141,6 +143,7 @@ int msm_debugfs_late_init(struct drm_device *dev) int msm_debugfs_init(struct drm_minor *minor) { struct drm_device *dev = minor->dev; + struct msm_drm_private *priv = dev->dev_private; int ret; ret = drm_debugfs_create_files(msm_debugfs_list, @@ -152,15 +155,25 @@ int msm_debugfs_init(struct drm_minor *minor) return ret; } - return 0; + if (priv->kms->funcs->debugfs_init) + ret = priv->kms->funcs->debugfs_init(priv->kms, minor); + + return ret; } void msm_debugfs_cleanup(struct drm_minor *minor) { + struct drm_device *dev = minor->dev; + struct msm_drm_private *priv = dev->dev_private; + drm_debugfs_remove_files(msm_debugfs_list, ARRAY_SIZE(msm_debugfs_list), minor); - if (!minor->dev->dev_private) + if (!priv) return; + + if (priv->kms->funcs->debugfs_cleanup) + priv->kms->funcs->debugfs_cleanup(priv->kms, minor); + msm_rd_debugfs_cleanup(minor); msm_perf_debugfs_cleanup(minor); } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 6abf315fd6da..816aafbaab87 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -15,6 +15,8 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <drm/drm_of.h> + #include "msm_drv.h" #include "msm_debugfs.h" #include "msm_fence.h" @@ -44,17 +46,21 @@ static const struct drm_mode_config_funcs mode_config_funcs = { .output_poll_changed = msm_fb_output_poll_changed, .atomic_check = msm_atomic_check, .atomic_commit = msm_atomic_commit, + .atomic_state_alloc = msm_atomic_state_alloc, + .atomic_state_clear = msm_atomic_state_clear, + .atomic_state_free = msm_atomic_state_free, }; -int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu) +int msm_register_address_space(struct drm_device *dev, + struct msm_gem_address_space *aspace) { struct msm_drm_private *priv = dev->dev_private; - int idx = priv->num_mmus++; + int idx = priv->num_aspaces++; - if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus))) + if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace))) return -EINVAL; - priv->mmus[idx] = mmu; + priv->aspace[idx] = aspace; return idx; } @@ -77,6 +83,10 @@ static char *vram = "16m"; MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)"); module_param(vram, charp, 0); +bool dumpstate = false; +MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors"); +module_param(dumpstate, bool, 0600); + /* * Util/helpers: */ @@ -904,10 +914,8 @@ static int add_components_mdp(struct device *mdp_dev, * remote-endpoint isn't a component that we need to add */ if (of_device_is_compatible(np, "qcom,mdp4") && - ep.port == 0) { - of_node_put(ep_node); + ep.port == 0) continue; - } /* * It's okay if some of the ports don't have a remote endpoint @@ -915,15 +923,12 @@ static int add_components_mdp(struct device *mdp_dev, * any external interface. */ intf = of_graph_get_remote_port_parent(ep_node); - if (!intf) { - of_node_put(ep_node); + if (!intf) continue; - } - - component_match_add(master_dev, matchptr, compare_of, intf); + drm_of_component_match_add(master_dev, matchptr, compare_of, + intf); of_node_put(intf); - of_node_put(ep_node); } return 0; @@ -963,8 +968,8 @@ static int add_display_components(struct device *dev, put_device(mdp_dev); /* add the MDP component itself */ - component_match_add(dev, matchptr, compare_of, - mdp_dev->of_node); + drm_of_component_match_add(dev, matchptr, compare_of, + mdp_dev->of_node); } else { /* MDP4 */ mdp_dev = dev; @@ -997,7 +1002,7 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - component_match_add(dev, matchptr, compare_of, np); + drm_of_component_match_add(dev, matchptr, compare_of, np); of_node_put(np); @@ -1036,7 +1041,13 @@ static int msm_pdev_probe(struct platform_device *pdev) if (ret) return ret; - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + /* on all devices that I am aware of, iommu's which can map + * any address the cpu can see are used: + */ + ret = dma_set_mask_and_coherent(&pdev->dev, ~0); + if (ret) + return ret; + return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index bc98d48c47f8..4a06d209855c 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -52,6 +52,8 @@ struct msm_perf_state; struct msm_gem_submit; struct msm_fence_context; struct msm_fence_cb; +struct msm_gem_address_space; +struct msm_gem_vma; #define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */ @@ -121,12 +123,16 @@ struct msm_drm_private { uint32_t pending_crtcs; wait_queue_head_t pending_crtcs_event; - /* registered MMUs: */ - unsigned int num_mmus; - struct msm_mmu *mmus[NUM_DOMAINS]; + /* Registered address spaces.. currently this is fixed per # of + * iommu's. Ie. one for display block and one for gpu block. + * Eventually, to do per-process gpu pagetables, we'll want one + * of these per-process. + */ + unsigned int num_aspaces; + struct msm_gem_address_space *aspace[NUM_DOMAINS]; unsigned int num_planes; - struct drm_plane *planes[8]; + struct drm_plane *planes[16]; unsigned int num_crtcs; struct drm_crtc *crtcs[8]; @@ -173,8 +179,22 @@ int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); +struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); +void msm_atomic_state_clear(struct drm_atomic_state *state); +void msm_atomic_state_free(struct drm_atomic_state *state); + +int msm_register_address_space(struct drm_device *dev, + struct msm_gem_address_space *aspace); + +void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt); +int msm_gem_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, int npages); -int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); +void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace); +struct msm_gem_address_space * +msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, + const char *name); void msm_gem_submit_free(struct msm_gem_submit *submit); int msm_ioctl_gem_submit(struct drm_device *dev, void *data, @@ -304,8 +324,8 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, void msm_writel(u32 data, void __iomem *addr); u32 msm_readl(const void __iomem *addr); -#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) -#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) +#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) +#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) static inline int align_pitch(int width, int bpp) { diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b6ac27e31929..1a17f8176a93 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -296,12 +296,10 @@ put_iova(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { - struct msm_mmu *mmu = priv->mmus[id]; - if (mmu && msm_obj->domain[id].iova) { - uint32_t offset = msm_obj->domain[id].iova; - mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); - msm_obj->domain[id].iova = 0; - } + if (!priv->aspace[id]) + continue; + msm_gem_unmap_vma(priv->aspace[id], + &msm_obj->domain[id], msm_obj->sgt); } } @@ -326,16 +324,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { - struct msm_mmu *mmu = priv->mmus[id]; - uint32_t offset; - - if (WARN_ON(!mmu)) - return -EINVAL; - - offset = (uint32_t)mmap_offset(obj); - ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, - obj->size, IOMMU_READ | IOMMU_WRITE); - msm_obj->domain[id].iova = offset; + ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id], + msm_obj->sgt, obj->size >> PAGE_SHIFT); } else { msm_obj->domain[id].iova = physaddr(obj); } @@ -632,8 +622,10 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) struct reservation_object *robj = msm_obj->resv; struct reservation_object_list *fobj; struct fence *fence; + struct msm_drm_private *priv = obj->dev->dev_private; uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; + unsigned id; WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); @@ -650,10 +642,15 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) break; } - seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n", + seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t", msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', obj->name, obj->refcount.refcount.counter, - off, msm_obj->vaddr, obj->size, madv); + off, msm_obj->vaddr); + + for (id = 0; id < priv->num_aspaces; id++) + seq_printf(m, " %08llx", msm_obj->domain[id].iova); + + seq_printf(m, " %zu%s\n", obj->size, madv); rcu_read_lock(); fobj = rcu_dereference(robj->fence); @@ -761,7 +758,6 @@ static int msm_gem_new_impl(struct drm_device *dev, { struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; - unsigned sz; bool use_vram = false; switch (flags & MSM_BO_CACHE_MASK) { @@ -783,16 +779,12 @@ static int msm_gem_new_impl(struct drm_device *dev, if (WARN_ON(use_vram && !priv->vram.size)) return -EINVAL; - sz = sizeof(*msm_obj); - if (use_vram) - sz += sizeof(struct drm_mm_node); - - msm_obj = kzalloc(sz, GFP_KERNEL); + msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); if (!msm_obj) return -ENOMEM; if (use_vram) - msm_obj->vram_node = (void *)&msm_obj[1]; + msm_obj->vram_node = &msm_obj->domain[0].node; msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index b2f13cfe945e..9b49ef72d5d4 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -24,6 +24,20 @@ /* Additional internal-use only BO flags: */ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ +struct msm_gem_address_space { + const char *name; + /* NOTE: mm managed at the page level, size is in # of pages + * and position mm_node->start is in # of pages: + */ + struct drm_mm mm; + struct msm_mmu *mmu; +}; + +struct msm_gem_vma { + struct drm_mm_node node; + uint64_t iova; +}; + struct msm_gem_object { struct drm_gem_object base; @@ -61,10 +75,7 @@ struct msm_gem_object { struct sg_table *sgt; void *vaddr; - struct { - // XXX - uint32_t iova; - } domain[NUM_DOMAINS]; + struct msm_gem_vma domain[NUM_DOMAINS]; /* normally (resv == &_resv) except for imported bo's */ struct reservation_object *resv; diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 192b2d3a79cb..949003003495 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -95,7 +95,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) mutex_unlock(&dev->struct_mutex); if (freed > 0) - pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT); + pr_debug_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT); return freed; } diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c new file mode 100644 index 000000000000..a311d26ccb21 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "msm_drv.h" +#include "msm_gem.h" +#include "msm_mmu.h" + +void +msm_gem_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt) +{ + if (!vma->iova) + return; + + if (aspace->mmu) { + unsigned size = vma->node.size << PAGE_SHIFT; + aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); + } + + drm_mm_remove_node(&vma->node); + + vma->iova = 0; +} + +int +msm_gem_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, int npages) +{ + int ret; + + if (WARN_ON(drm_mm_node_allocated(&vma->node))) + return 0; + + ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages, + 0, DRM_MM_SEARCH_DEFAULT); + if (ret) + return ret; + + vma->iova = vma->node.start << PAGE_SHIFT; + + if (aspace->mmu) { + unsigned size = npages << PAGE_SHIFT; + ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, + size, IOMMU_READ | IOMMU_WRITE); + } + + return ret; +} + +void +msm_gem_address_space_destroy(struct msm_gem_address_space *aspace) +{ + drm_mm_takedown(&aspace->mm); + if (aspace->mmu) + aspace->mmu->funcs->destroy(aspace->mmu); + kfree(aspace); +} + +struct msm_gem_address_space * +msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, + const char *name) +{ + struct msm_gem_address_space *aspace; + + aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); + if (!aspace) + return ERR_PTR(-ENOMEM); + + aspace->name = name; + aspace->mmu = msm_iommu_new(dev, domain); + + drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT), + (domain->geometry.aperture_end >> PAGE_SHIFT) - 1); + + return aspace; +} diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5bb09838b5ae..5814e968f62b 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -634,6 +634,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->grp_clks[i] = NULL; } + + /* HACK: Boost the GPU clock on msm8916 for better performance. + * This should be removed when we have proper support for msm_bus + * and QoS. + */ + if (gpu->grp_clks[4] && gpu->grp_clks[5]) + clk_set_rate(gpu->grp_clks[4], INT_MAX); + gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk"); DBG("ebi1_clk: %p", gpu->ebi1_clk); if (IS_ERR(gpu->ebi1_clk)) @@ -656,12 +664,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, */ iommu = iommu_domain_alloc(&platform_bus_type); if (iommu) { + /* TODO 32b vs 64b address space.. */ + iommu->geometry.aperture_start = 0x1000; + iommu->geometry.aperture_end = 0xffffffff; + dev_info(drm->dev, "%s: using IOMMU\n", name); - gpu->mmu = msm_iommu_new(&pdev->dev, iommu); - if (IS_ERR(gpu->mmu)) { - ret = PTR_ERR(gpu->mmu); + gpu->aspace = msm_gem_address_space_create(&pdev->dev, + iommu, "gpu"); + if (IS_ERR(gpu->aspace)) { + ret = PTR_ERR(gpu->aspace); dev_err(drm->dev, "failed to init iommu: %d\n", ret); - gpu->mmu = NULL; + gpu->aspace = NULL; iommu_domain_free(iommu); goto fail; } @@ -669,7 +682,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, } else { dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); } - gpu->id = msm_register_mmu(drm, gpu->mmu); + gpu->id = msm_register_address_space(drm, gpu->aspace); /* Create ringbuffer: */ @@ -705,8 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_ringbuffer_destroy(gpu->rb); } - if (gpu->mmu) - gpu->mmu->funcs->destroy(gpu->mmu); + if (gpu->aspace) + msm_gem_address_space_destroy(gpu->aspace); if (gpu->fctx) msm_fence_context_free(gpu->fctx); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index d61d98a6e047..c6bf5d6ebc20 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -98,7 +98,7 @@ struct msm_gpu { void __iomem *mmio; int irq; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; int id; /* Power Control: */ diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 3a294d0da3a0..b733469d7a03 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -21,6 +21,7 @@ struct msm_iommu { struct msm_mmu base; struct iommu_domain *domain; + bool has_ctx; }; #define to_msm_iommu(x) container_of(x, struct msm_iommu, base) @@ -35,14 +36,46 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names, int cnt) { struct msm_iommu *iommu = to_msm_iommu(mmu); - return iommu_attach_device(iommu->domain, mmu->dev); + int i, ret; + + if (!iommu->has_ctx) + return iommu_attach_device(iommu->domain, mmu->dev); + + for (i = 0; i < cnt; i++) { + struct device *ctx = msm_iommu_get_ctx(names[i]); + + if (IS_ERR_OR_NULL(ctx)) { + dev_warn(mmu->dev, "couldn't get %s context", names[i]); + continue; + } + + ret = iommu_attach_device(iommu->domain, ctx); + if (ret) { + dev_warn(mmu->dev, "could not attach iommu to %s", names[i]); + return ret; + } + } + + return 0; } static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, int cnt) { struct msm_iommu *iommu = to_msm_iommu(mmu); - iommu_detach_device(iommu->domain, mmu->dev); + int i; + + if (!iommu->has_ctx) + iommu_detach_device(iommu->domain, mmu->dev); + + for (i = 0; i < cnt; i++) { + struct device *ctx = msm_iommu_get_ctx(names[i]); + + if (IS_ERR_OR_NULL(ctx)) + continue; + + iommu_detach_device(iommu->domain, ctx); + } } static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, @@ -138,5 +171,9 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) msm_mmu_init(&iommu->base, dev, &funcs); iommu_set_fault_handler(domain, msm_fault_handler, dev); + if (of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v2") || + of_find_compatible_node(NULL, NULL, "qcom,msm-mmu-500")) + iommu->has_ctx = true; + return &iommu->base; } diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 40e41e5cdbc6..e470f4cf8f76 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -40,6 +40,8 @@ struct msm_kms_funcs { irqreturn_t (*irq)(struct msm_kms *kms); int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); + /* swap global atomic state: */ + void (*swap_state)(struct msm_kms *kms, struct drm_atomic_state *state); /* modeset, bracketing atomic_commit(): */ void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state); void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state); @@ -56,6 +58,11 @@ struct msm_kms_funcs { bool is_cmd_mode); /* cleanup: */ void (*destroy)(struct msm_kms *kms); +#ifdef CONFIG_DEBUG_FS + /* debugfs: */ + int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor); + void (*debugfs_cleanup)(struct msm_kms *kms, struct drm_minor *minor); +#endif }; struct msm_kms { @@ -65,6 +72,18 @@ struct msm_kms { int irq; }; +/** + * Subclass of drm_atomic_state, to allow kms backend to have driver + * private global state. The kms backend can do whatever it wants + * with the ->state ptr. On ->atomic_state_clear() the ->state ptr + * is kfree'd and set back to NULL. + */ +struct msm_kms_state { + struct drm_atomic_state base; + void *state; +}; +#define to_kms_state(x) container_of(x, struct msm_kms_state, base) + static inline void msm_kms_init(struct msm_kms *kms, const struct msm_kms_funcs *funcs) { diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index b8ca9a0e9170..73445c70cde7 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -45,4 +45,12 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); +#ifdef CONFIG_QCOM_IOMMU_V1 +struct device *msm_iommu_get_ctx(const char *ctx_name); +#else +static inline struct device *msm_iommu_get_ctx(const char *ctx_name) +{ + return NULL; +} +#endif #endif /* __MSM_MMU_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 66ac8c40db26..1549e8393056 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -108,16 +108,12 @@ static void omap_plane_atomic_update(struct drm_plane *plane, win.src_x = state->src_x >> 16; win.src_y = state->src_y >> 16; - switch (state->rotation & DRM_ROTATE_MASK) { - case DRM_ROTATE_90: - case DRM_ROTATE_270: + if (drm_rotation_90_or_270(state->rotation)) { win.src_w = state->src_h >> 16; win.src_h = state->src_w >> 16; - break; - default: + } else { win.src_w = state->src_w >> 16; win.src_h = state->src_h >> 16; - break; } /* update scanout: */ diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 3c58669a06ce..6f7f9c59f05b 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -1,7 +1,6 @@ config DRM_ROCKCHIP tristate "DRM Support for Rockchip" depends on DRM && ROCKCHIP_IOMMU - depends on RESET_CONTROLLER select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER select DRM_PANEL diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 8c8cbe837e61..6fe161192bb4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -20,6 +20,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_of.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/module.h> @@ -388,7 +389,7 @@ static void rockchip_add_endpoints(struct device *dev, continue; } - component_match_add(dev, match, compare_of, remote); + drm_of_component_match_add(dev, match, compare_of, remote); of_node_put(remote); } } @@ -437,7 +438,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev) } of_node_put(iommu); - component_match_add(dev, &match, compare_of, port->parent); + drm_of_component_match_add(dev, &match, compare_of, + port->parent); of_node_put(port); } diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 9df308565f6c..f3ca4b63e6c8 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -17,6 +17,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_of.h> #include "sti_crtc.h" #include "sti_drv.h" @@ -443,8 +444,8 @@ static int sti_platform_probe(struct platform_device *pdev) child_np = of_get_next_available_child(node, NULL); while (child_np) { - component_match_add(dev, &match, compare_of, child_np); - of_node_put(child_np); + drm_of_component_match_add(dev, &match, compare_of, + child_np); child_np = of_get_next_available_child(node, child_np); } diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 1feec34ca9dd..cc3a1e4ca530 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -18,6 +18,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_of.h> #include "sun4i_crtc.h" #include "sun4i_drv.h" @@ -250,7 +251,7 @@ static int sun4i_drv_add_endpoints(struct device *dev, /* Add current component */ DRM_DEBUG_DRIVER("Adding component %s\n", of_node_full_name(node)); - component_match_add(dev, match, compare_of, node); + drm_of_component_match_add(dev, match, compare_of, node); count++; } diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index 63ebb154b9b5..bbf5a4b7e0b6 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -3,7 +3,6 @@ config DRM_TEGRA depends on ARCH_TEGRA || (ARM && COMPILE_TEST) depends on COMMON_CLK depends on DRM - depends on RESET_CONTROLLER select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index 68e895021005..06a4c584f3cb 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -10,6 +10,7 @@ #include <linux/component.h> #include <linux/of_graph.h> +#include <drm/drm_of.h> #include "tilcdc_drv.h" #include "tilcdc_external.h" @@ -160,7 +161,8 @@ int tilcdc_get_external_components(struct device *dev, dev_dbg(dev, "Subdevice node '%s' found\n", node->name); if (match) - component_match_add(dev, match, dev_match_of, node); + drm_of_component_match_add(dev, match, dev_match_of, + node); of_node_put(node); count++; } diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig index aefdff95356d..08766c6e7856 100644 --- a/drivers/gpu/ipu-v3/Kconfig +++ b/drivers/gpu/ipu-v3/Kconfig @@ -1,7 +1,6 @@ config IMX_IPUV3_CORE tristate "IPUv3 core support" depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM - depends on RESET_CONTROLLER select GENERIC_IRQ_CHIP help Choose this if you have a i.MX5/6 system and want to use the Image diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index d252276feadf..bf350d192f40 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -780,6 +780,16 @@ config I2C_PXA_SLAVE is necessary for systems where the PXA may be a target on the I2C bus. +config I2C_QCOM_CCI + tristate "Qualcomm Camera Control Interface" + depends on ARCH_QCOM + help + If you say yes to this option, support will be included for the + built-in camera control interface on the Qualcomm SoCs. + + This driver can also be built as a module. If so, the module + will be called i2c-qcom-cci. + config I2C_QUP tristate "Qualcomm QUP based I2C controller" depends on ARCH_QCOM diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 29764cc20a44..aed9bac6ec3b 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -75,6 +75,7 @@ obj-$(CONFIG_I2C_PNX) += i2c-pnx.o obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o obj-$(CONFIG_I2C_PXA) += i2c-pxa.o obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o +obj-$(CONFIG_I2C_QCOM_CCI) += i2c-qcom-cci.o obj-$(CONFIG_I2C_QUP) += i2c-qup.o obj-$(CONFIG_I2C_RIIC) += i2c-riic.o obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c new file mode 100644 index 000000000000..81987a66c839 --- /dev/null +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -0,0 +1,791 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/i2c.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> + +#define CCI_HW_VERSION 0x0 +#define CCI_RESET_CMD 0x004 +#define CCI_RESET_CMD_MASK 0x0f73f3f7 +#define CCI_RESET_CMD_M0_MASK 0x000003f1 +#define CCI_RESET_CMD_M1_MASK 0x0003f001 +#define CCI_QUEUE_START 0x008 +#define CCI_HALT_REQ 0x034 +#define CCI_HALT_REQ_I2C_M0_Q0Q1 (1 << 0) +#define CCI_HALT_REQ_I2C_M1_Q0Q1 (1 << 1) + +#define CCI_I2C_Mm_SCL_CTL(m) (0x100 + 0x100 * (m)) +#define CCI_I2C_Mm_SDA_CTL_0(m) (0x104 + 0x100 * (m)) +#define CCI_I2C_Mm_SDA_CTL_1(m) (0x108 + 0x100 * (m)) +#define CCI_I2C_Mm_SDA_CTL_2(m) (0x10c + 0x100 * (m)) +#define CCI_I2C_Mm_MISC_CTL(m) (0x110 + 0x100 * (m)) + +#define CCI_I2C_Mm_READ_DATA(m) (0x118 + 0x100 * (m)) +#define CCI_I2C_Mm_READ_BUF_LEVEL(m) (0x11c + 0x100 * (m)) +#define CCI_I2C_Mm_Qn_EXEC_WORD_CNT(m, n) (0x300 + 0x200 * (m) + 0x100 * (n)) +#define CCI_I2C_Mm_Qn_CUR_WORD_CNT(m, n) (0x304 + 0x200 * (m) + 0x100 * (n)) +#define CCI_I2C_Mm_Qn_CUR_CMD(m, n) (0x308 + 0x200 * (m) + 0x100 * (n)) +#define CCI_I2C_Mm_Qn_REPORT_STATUS(m, n) (0x30c + 0x200 * (m) + 0x100 * (n)) +#define CCI_I2C_Mm_Qn_LOAD_DATA(m, n) (0x310 + 0x200 * (m) + 0x100 * (n)) + +#define CCI_IRQ_GLOBAL_CLEAR_CMD 0xc00 +#define CCI_IRQ_MASK_0 0xc04 +#define CCI_IRQ_MASK_0_I2C_M0_RD_DONE (1 << 0) +#define CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT (1 << 4) +#define CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT (1 << 8) +#define CCI_IRQ_MASK_0_I2C_M1_RD_DONE (1 << 12) +#define CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT (1 << 16) +#define CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT (1 << 20) +#define CCI_IRQ_MASK_0_RST_DONE_ACK (1 << 24) +#define CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK (1 << 25) +#define CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK (1 << 26) +#define CCI_IRQ_MASK_0_I2C_M0_ERROR 0x18000ee6 +#define CCI_IRQ_MASK_0_I2C_M1_ERROR 0x60ee6000 +#define CCI_IRQ_CLEAR_0 0xc08 +#define CCI_IRQ_STATUS_0 0xc0c +#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE (1 << 0) +#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT (1 << 4) +#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT (1 << 8) +#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE (1 << 12) +#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT (1 << 16) +#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT (1 << 20) +#define CCI_IRQ_STATUS_0_RST_DONE_ACK (1 << 24) +#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK (1 << 25) +#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK (1 << 26) +#define CCI_IRQ_STATUS_0_I2C_M0_ERROR 0x18000ee6 +#define CCI_IRQ_STATUS_0_I2C_M1_ERROR 0x60ee6000 + +#define CCI_TIMEOUT_MS 100 +#define NUM_MASTERS 1 +#define NUM_QUEUES 2 + +#define CCI_RES_MAX 6 + +enum cci_i2c_cmd { + CCI_I2C_SET_PARAM = 1, + CCI_I2C_WAIT, + CCI_I2C_WAIT_SYNC, + CCI_I2C_WAIT_GPIO_EVENT, + CCI_I2C_TRIG_I2C_EVENT, + CCI_I2C_LOCK, + CCI_I2C_UNLOCK, + CCI_I2C_REPORT, + CCI_I2C_WRITE, + CCI_I2C_READ, + CCI_I2C_WRITE_DISABLE_P, + CCI_I2C_READ_DISABLE_P, +}; + +enum { + I2C_MODE_STANDARD, + I2C_MODE_FAST, + I2C_MODE_FAST_PLUS, +}; + +enum cci_i2c_queue_t { + QUEUE_0, + QUEUE_1 +}; + +enum cci_i2c_master_t { + MASTER_0, + MASTER_1 +}; + +struct resources { + char *clock[CCI_RES_MAX]; + u32 clock_rate[CCI_RES_MAX]; + char *reg[CCI_RES_MAX]; + char *interrupt[CCI_RES_MAX]; +}; + +struct hw_params { + u16 thigh; + u16 tlow; + u16 tsu_sto; + u16 tsu_sta; + u16 thd_dat; + u16 thd_sta; + u16 tbuf; + u8 scl_stretch_en; + u16 trdhld; + u16 tsp; +}; + +struct cci_clock { + struct clk *clk; + const char *name; + u32 freq; +}; + +struct cci_master { + u32 status; + u8 complete_pending; + struct completion irq_complete; +}; + +struct cci { + struct device *dev; + struct i2c_adapter adap; + void __iomem *base; + u32 irq; + char irq_name[30]; + struct cci_clock *clock; + int nclocks; + u8 mode; + u16 queue_size[NUM_QUEUES]; + struct cci_master master[NUM_MASTERS]; +}; + +static const struct resources res_v1_0_8 = { + .clock = { "camss_top_ahb", + "cci_ahb", + "camss_ahb", + "cci" }, + .clock_rate = { 0, + 80000000, + 0, + 19200000 }, + .reg = { "cci" }, + .interrupt = { "cci" } +}; + +static const struct resources res_v1_4_0 = { + .clock = { "mmss_mmagic_ahb", + "camss_top_ahb", + "cci_ahb", + "camss_ahb", + "cci" }, + .clock_rate = { 0, + 0, + 0, + 0, + 37500000 }, + .reg = { "cci" }, + .interrupt = { "cci" } +}; + +static const struct hw_params hw_params_v1_0_8[3] = { + { /* I2C_MODE_STANDARD */ + .thigh = 78, + .tlow = 114, + .tsu_sto = 28, + .tsu_sta = 28, + .thd_dat = 10, + .thd_sta = 77, + .tbuf = 118, + .scl_stretch_en = 0, + .trdhld = 6, + .tsp = 1 + }, + { /* I2C_MODE_FAST */ + .thigh = 20, + .tlow = 28, + .tsu_sto = 21, + .tsu_sta = 21, + .thd_dat = 13, + .thd_sta = 18, + .tbuf = 32, + .scl_stretch_en = 0, + .trdhld = 6, + .tsp = 3 + } +}; + +static const struct hw_params hw_params_v1_4_0[3] = { + { /* I2C_MODE_STANDARD */ + .thigh = 201, + .tlow = 174, + .tsu_sto = 204, + .tsu_sta = 231, + .thd_dat = 22, + .thd_sta = 162, + .tbuf = 227, + .scl_stretch_en = 0, + .trdhld = 6, + .tsp = 3 + }, + { /* I2C_MODE_FAST */ + .thigh = 38, + .tlow = 56, + .tsu_sto = 40, + .tsu_sta = 40, + .thd_dat = 22, + .thd_sta = 35, + .tbuf = 62, + .scl_stretch_en = 0, + .trdhld = 6, + .tsp = 3 + }, + { /* I2C_MODE_FAST_PLUS */ + .thigh = 16, + .tlow = 22, + .tsu_sto = 17, + .tsu_sta = 18, + .thd_dat = 16, + .thd_sta = 15, + .tbuf = 24, + .scl_stretch_en = 0, + .trdhld = 3, + .tsp = 3 + } +}; + +static const u16 queue_0_size_v1_0_8 = 64; +static const u16 queue_1_size_v1_0_8 = 16; + +static const u16 queue_0_size_v1_4_0 = 64; +static const u16 queue_1_size_v1_4_0 = 16; + +/* + * cci_enable_clocks - Enable multiple clocks + * @nclocks: Number of clocks in clock array + * @clock: Clock array + * @dev: Device + * + * Return 0 on success or a negative error code otherwise + */ +int cci_enable_clocks(int nclocks, struct cci_clock *clock, struct device *dev) +{ + int ret; + int i; + + for (i = 0; i < nclocks; i++) { + if (clock[i].freq) { + long rate; + + rate = clk_round_rate(clock[i].clk, clock[i].freq); + if (rate < 0) { + dev_err(dev, "clk round rate failed: %ld\n", + rate); + goto error; + } + + ret = clk_set_rate(clock[i].clk, clock[i].freq); + if (ret < 0) { + dev_err(dev, "clk set rate failed: %d\n", ret); + goto error; + } + } + + ret = clk_prepare_enable(clock[i].clk); + if (ret) { + dev_err(dev, "clock enable failed, ret: %d\n", ret); + goto error; + } + } + + return 0; + +error: + for (i--; i >= 0; i--) + clk_disable_unprepare(clock[i].clk); + + return ret; +} + +/* + * cci_disable_clocks - Disable multiple clocks + * @nclocks: Number of clocks in clock array + * @clock: Clock array + */ +void cci_disable_clocks(int nclocks, struct cci_clock *clock) +{ + int i; + + for (i = nclocks - 1; i >= 0; i--) + clk_disable_unprepare(clock[i].clk); +} + +static irqreturn_t cci_isr(int irq, void *dev) +{ + struct cci *cci = dev; + u32 val; + + val = readl(cci->base + CCI_IRQ_STATUS_0); + writel(val, cci->base + CCI_IRQ_CLEAR_0); + writel(0x1, cci->base + CCI_IRQ_GLOBAL_CLEAR_CMD); + + if (val & CCI_IRQ_STATUS_0_RST_DONE_ACK) { + if (cci->master[MASTER_0].complete_pending) { + cci->master[MASTER_0].complete_pending = 0; + complete(&cci->master[MASTER_0].irq_complete); + } + + if (cci->master[MASTER_1].complete_pending) { + cci->master[MASTER_1].complete_pending = 0; + complete(&cci->master[MASTER_1].irq_complete); + } + } + + if (val & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE || + val & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT || + val & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT) { + cci->master[MASTER_0].status = 0; + complete(&cci->master[MASTER_0].irq_complete); + } + + if (val & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE || + val & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT || + val & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT) { + cci->master[MASTER_1].status = 0; + complete(&cci->master[MASTER_1].irq_complete); + } + + if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK)) { + cci->master[MASTER_0].complete_pending = 1; + writel(CCI_RESET_CMD_M0_MASK, cci->base + CCI_RESET_CMD); + } + + if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK)) { + cci->master[MASTER_1].complete_pending = 1; + writel(CCI_RESET_CMD_M1_MASK, cci->base + CCI_RESET_CMD); + } + + if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_ERROR)) { + dev_err_ratelimited(cci->dev, "MASTER_0 error 0x%08x\n", val); + cci->master[MASTER_0].status = -EIO; + writel(CCI_HALT_REQ_I2C_M0_Q0Q1, cci->base + CCI_HALT_REQ); + } + + if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) { + dev_err_ratelimited(cci->dev, "MASTER_1 error 0x%08x\n", val); + cci->master[MASTER_1].status = -EIO; + writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ); + } + + return IRQ_HANDLED; +} + +static int cci_reset(struct cci *cci) +{ + unsigned long time; + + cci->master[MASTER_0].complete_pending = 1; + writel(CCI_RESET_CMD_MASK, cci->base + CCI_RESET_CMD); + time = wait_for_completion_timeout( + &cci->master[MASTER_0].irq_complete, + msecs_to_jiffies(CCI_TIMEOUT_MS)); + if (!time) { + dev_err(cci->dev, "CCI reset timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int cci_init(struct cci *cci, const struct hw_params *hw) +{ + u32 val = CCI_IRQ_MASK_0_I2C_M0_RD_DONE | + CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT | + CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT | + CCI_IRQ_MASK_0_I2C_M1_RD_DONE | + CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT | + CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT | + CCI_IRQ_MASK_0_RST_DONE_ACK | + CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK | + CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK | + CCI_IRQ_MASK_0_I2C_M0_ERROR | + CCI_IRQ_MASK_0_I2C_M1_ERROR; + int i; + + writel(val, cci->base + CCI_IRQ_MASK_0); + + for (i = 0; i < NUM_MASTERS; i++) { + val = hw->thigh << 16 | hw->tlow; + writel(val, cci->base + CCI_I2C_Mm_SCL_CTL(i)); + + val = hw->tsu_sto << 16 | hw->tsu_sta; + writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_0(i)); + + val = hw->thd_dat << 16 | hw->thd_sta; + writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_1(i)); + + val = hw->tbuf; + writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_2(i)); + + val = hw->scl_stretch_en << 8 | hw->trdhld << 4 | hw->tsp; + writel(val, cci->base + CCI_I2C_Mm_MISC_CTL(i)); + + cci->master[i].status = 0; + } + + return 0; +} + +static int cci_run_queue(struct cci *cci, u8 master, u8 queue) +{ + unsigned long time; + u32 val; + int ret; + + val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue)); + writel(val, cci->base + CCI_I2C_Mm_Qn_EXEC_WORD_CNT(master, queue)); + + val = 1 << ((master * 2) + queue); + writel(val, cci->base + CCI_QUEUE_START); + + time = wait_for_completion_timeout(&cci->master[master].irq_complete, + CCI_TIMEOUT_MS); + if (!time) { + dev_err(cci->dev, "master %d queue %d timeout\n", + master, queue); + return -ETIMEDOUT; + } + + ret = cci->master[master].status; + if (ret < 0) + dev_err(cci->dev, "master %d queue %d error %d\n", + master, queue, ret); + + return ret; +} + +static int cci_validate_queue(struct cci *cci, u32 len, u8 master, u8 queue) +{ + int ret = 0; + u32 val; + + val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue)); + + if (val + len + 1 > cci->queue_size[queue]) { + val = CCI_I2C_REPORT | (1 << 8); + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + + ret = cci_run_queue(cci, master, queue); + } + + return ret; +} + +static int cci_i2c_read(struct cci *cci, u16 addr, u8 *buf, u16 len) { + u8 master = MASTER_0; + u8 queue = QUEUE_1; + u32 val; + u32 words_read, words_exp; + int i, index, first; + int ret; + + if (len > cci->adap.quirks->max_read_len) + return -EOPNOTSUPP; + + /* + * Call validate queue to make sure queue is empty before starting. + * This is to avoid overflow / underflow of queue. + */ + ret = cci_validate_queue(cci, cci->queue_size[queue], master, queue); + if (ret < 0) + return ret; + + val = CCI_I2C_SET_PARAM | ((addr >> 1) & 0x7f) << 4; + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + + val = CCI_I2C_READ | len << 4; + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + + ret = cci_run_queue(cci, master, queue); + if (ret < 0) + return ret; + + words_read = readl(cci->base + CCI_I2C_Mm_READ_BUF_LEVEL(master)); + words_exp = len / 4 + 1; + if (words_read != words_exp) { + dev_err(cci->dev, "words read = %d, words expected = %d\n", + words_read, words_exp); + return -EIO; + } + + index = 0; + first = 1; + do { + val = readl(cci->base + CCI_I2C_Mm_READ_DATA(master)); + + for (i = 0; i < 4 && index < len; i++) { + if (first) { + first = 0; + continue; + } + buf[index++] = (val >> (i * 8)) & 0xff; + } + } while (--words_read); + + return 0; +} + +static int cci_i2c_write(struct cci *cci, u16 addr, u8 *buf, u16 len) { + u8 master = MASTER_0; + u8 queue = QUEUE_0; + u8 load[12] = { 0 }; + u8 i, j; + u32 val; + int ret; + + if (len > cci->adap.quirks->max_write_len) + return -EOPNOTSUPP; + + /* + * Call validate queue to make sure queue is empty before starting. + * This is to avoid overflow / underflow of queue. + */ + ret = cci_validate_queue(cci, cci->queue_size[queue], master, queue); + if (ret < 0) + return ret; + + val = CCI_I2C_SET_PARAM | ((addr >> 1) & 0x7f) << 4; + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + + i = 0; + load[i++] = CCI_I2C_WRITE | len << 4; + + for (j = 0; j < len; j++) + load[i++] = buf[j]; + + for (j = 0; j < i; j += 4) { + val = load[j]; + val |= load[j + 1] << 8; + val |= load[j + 2] << 16; + val |= load[j + 3] << 24; + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + } + + val = CCI_I2C_REPORT | 1 << 8; + writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue)); + + return cci_run_queue(cci, master, queue); +} + +static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +{ + struct cci *cci = i2c_get_adapdata(adap); + int i; + int ret = 0; + + if (!num) + return -EOPNOTSUPP; + + for (i = 0; i < num; i++) { + if (msgs[i].flags & I2C_M_RD) + ret = cci_i2c_read(cci, msgs[i].addr, msgs[i].buf, + msgs[i].len); + else + ret = cci_i2c_write(cci, msgs[i].addr, msgs[i].buf, + msgs[i].len); + + if (ret < 0) { + dev_err(cci->dev, "cci i2c xfer error %d", ret); + break; + } + } + + return ret; +} + +static u32 cci_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C; +} + +static const struct i2c_algorithm cci_algo = { + .master_xfer = cci_xfer, + .functionality = cci_func, +}; + +static const struct i2c_adapter_quirks cci_quirks_v1_0_8 = { + .max_write_len = 10, + .max_read_len = 12, +}; + +static const struct i2c_adapter_quirks cci_quirks_v1_4_0 = { + .max_write_len = 11, + .max_read_len = 12, +}; + +/* + * cci_probe - Probe CCI platform device + * @pdev: Pointer to CCI platform device + * + * Return 0 on success or a negative error code on failure + */ +static int cci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct resources *res; + const struct hw_params *hw; + struct cci *cci; + struct resource *r; + int ret = 0; + u32 val; + int i; + + cci = devm_kzalloc(&pdev->dev, sizeof(*cci), GFP_KERNEL); + if (!cci) + return -ENOMEM; + + cci->dev = dev; + platform_set_drvdata(pdev, cci); + + if (of_device_is_compatible(dev->of_node, "qcom,cci-v1.0.8")) { + res = &res_v1_0_8; + hw = hw_params_v1_0_8; + cci->queue_size[0] = queue_0_size_v1_0_8; + cci->queue_size[1] = queue_1_size_v1_0_8; + cci->adap.quirks = &cci_quirks_v1_0_8; + } else if (of_device_is_compatible(dev->of_node, "qcom,cci-v1.4.0")) { + res = &res_v1_4_0; + hw = hw_params_v1_4_0; + cci->queue_size[0] = queue_0_size_v1_4_0; + cci->queue_size[1] = queue_1_size_v1_4_0; + cci->adap.quirks = &cci_quirks_v1_4_0; + } else { + return -EINVAL; + } + + cci->adap.algo = &cci_algo; + cci->adap.dev.parent = cci->dev; + cci->adap.dev.of_node = dev->of_node; + i2c_set_adapdata(&cci->adap, cci); + + strlcpy(cci->adap.name, "Qualcomm Camera Control Interface", + sizeof(cci->adap.name)); + + cci->mode = I2C_MODE_STANDARD; + ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &val); + if (!ret) { + if (val == 400000) + cci->mode = I2C_MODE_FAST; + else if (val == 1000000) + cci->mode = I2C_MODE_FAST_PLUS; + } + + /* Memory */ + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]); + cci->base = devm_ioremap_resource(dev, r); + if (IS_ERR(cci->base)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(cci->base); + } + + /* Interrupt */ + + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + res->interrupt[0]); + if (!r) { + dev_err(dev, "missing IRQ\n"); + return -EINVAL; + } + + cci->irq = r->start; + snprintf(cci->irq_name, sizeof(cci->irq_name), "%s", dev_name(dev)); + ret = devm_request_irq(dev, cci->irq, cci_isr, + IRQF_TRIGGER_RISING, cci->irq_name, cci); + if (ret < 0) { + dev_err(dev, "request_irq failed, ret: %d\n", ret); + return ret; + } + + disable_irq(cci->irq); + + /* Clocks */ + + cci->nclocks = 0; + while (res->clock[cci->nclocks]) + cci->nclocks++; + + cci->clock = devm_kzalloc(dev, cci->nclocks * + sizeof(*cci->clock), GFP_KERNEL); + if (!cci->clock) + return -ENOMEM; + + for (i = 0; i < cci->nclocks; i++) { + struct cci_clock *clock = &cci->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->name = res->clock[i]; + clock->freq = res->clock_rate[i]; + } + + ret = cci_enable_clocks(cci->nclocks, cci->clock, dev); + if (ret < 0) + return ret; + + val = readl_relaxed(cci->base + CCI_HW_VERSION); + dev_info(dev, "%s: CCI HW version = 0x%08x", __func__, val); + + init_completion(&cci->master[0].irq_complete); + init_completion(&cci->master[1].irq_complete); + + enable_irq(cci->irq); + + ret = cci_reset(cci); + if (ret < 0) + return ret; + + ret = cci_init(cci, &hw[cci->mode]); + if (ret < 0) + return ret; + + ret = i2c_add_adapter(&cci->adap); + + return ret; +} + +/* + * cci_remove - Remove CCI platform device + * @pdev: Pointer to CCI platform device + * + * Always returns 0. + */ +static int cci_remove(struct platform_device *pdev) +{ + struct cci *cci = platform_get_drvdata(pdev); + + disable_irq(cci->irq); + cci_disable_clocks(cci->nclocks, cci->clock); + + i2c_del_adapter(&cci->adap); + + return 0; +} + +static const struct of_device_id cci_dt_match[] = { + { .compatible = "qcom,cci-v1.0.8" }, + { .compatible = "qcom,cci-v1.4.0" }, + {} +}; +MODULE_DEVICE_TABLE(of, cci_dt_match); + +static struct platform_driver qcom_cci_driver = { + .probe = cci_probe, + .remove = cci_remove, + .driver = { + .name = "i2c-qcom-cci", + .of_match_table = cci_dt_match, + }, +}; + +module_platform_driver(qcom_cci_driver); + +MODULE_ALIAS("platform:i2c-qcom-cci"); +MODULE_DESCRIPTION("Qualcomm Camera Control Interface driver"); +MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c index e317b75357a0..7ba4fe1bfd3e 100644 --- a/drivers/input/misc/pm8941-pwrkey.c +++ b/drivers/input/misc/pm8941-pwrkey.c @@ -28,6 +28,7 @@ #define PON_RT_STS 0x10 #define PON_KPDPWR_N_SET BIT(0) +#define PON_RESIN_N_SET BIT(1) #define PON_PS_HOLD_RST_CTL 0x5a #define PON_PS_HOLD_RST_CTL2 0x5b @@ -37,6 +38,7 @@ #define PON_PS_HOLD_TYPE_HARD_RESET 7 #define PON_PULL_CTL 0x70 +#define PON_RESIN_PULL_UP BIT(0) #define PON_KPDPWR_PULL_UP BIT(1) #define PON_DBC_CTL 0x71 @@ -52,6 +54,7 @@ struct pm8941_pwrkey { unsigned int revision; struct notifier_block reboot_notifier; + unsigned int resin_code; }; static int pm8941_reboot_notify(struct notifier_block *nb, @@ -130,6 +133,25 @@ static irqreturn_t pm8941_pwrkey_irq(int irq, void *_data) return IRQ_HANDLED; } +static irqreturn_t pm8941_resin_irq(int irq, void *_data) +{ + struct pm8941_pwrkey *pwrkey = _data; + unsigned int sts; + int error; + + error = regmap_read(pwrkey->regmap, + pwrkey->baseaddr + PON_RT_STS, &sts); + if (error) + return IRQ_HANDLED; + + input_report_key(pwrkey->input, pwrkey->resin_code, + !!(sts & PON_RESIN_N_SET)); + + input_sync(pwrkey->input); + + return IRQ_HANDLED; +} + static int __maybe_unused pm8941_pwrkey_suspend(struct device *dev) { struct pm8941_pwrkey *pwrkey = dev_get_drvdata(dev); @@ -153,6 +175,46 @@ static int __maybe_unused pm8941_pwrkey_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(pm8941_pwr_key_pm_ops, pm8941_pwrkey_suspend, pm8941_pwrkey_resume); +static void pm8941_resin_setup(struct platform_device *pdev, + struct pm8941_pwrkey *pwrkey) +{ + int irq, error; + bool pull_up; + u32 code; + + irq = platform_get_irq(pdev, 1); + if (irq < 0) + return; + + pull_up = of_property_read_bool(pdev->dev.of_node, "resin-pull-up"); + + error = regmap_update_bits(pwrkey->regmap, + pwrkey->baseaddr + PON_PULL_CTL, + PON_RESIN_PULL_UP, + pull_up ? PON_RESIN_PULL_UP : 0); + if (error) { + dev_err(&pdev->dev, "failed to set pull: %d\n", error); + return; + } + + error = of_property_read_u32(pdev->dev.of_node, "linux,code", &code); + if (error) { + dev_err(&pdev->dev, "resin no linux,code %d\n", error); + return; + } + + pwrkey->resin_code = code; + + input_set_capability(pwrkey->input, EV_KEY, code); + + error = devm_request_threaded_irq(&pdev->dev, irq, + NULL, pm8941_resin_irq, + IRQF_ONESHOT, + "pm8941_resin", pwrkey); + if (error) + dev_err(&pdev->dev, "failed requesting IRQ: %d\n", error); +} + static int pm8941_pwrkey_probe(struct platform_device *pdev) { struct pm8941_pwrkey *pwrkey; @@ -241,6 +303,8 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev) return error; } + pm8941_resin_setup(pdev, pwrkey); + error = input_register_device(pwrkey->input); if (error) { dev_err(&pdev->dev, "failed to register input device: %d\n", diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 8ee54d71c7eb..ebcfd7d02bf1 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -102,6 +102,8 @@ config IOMMU_PGTABLES_L2 def_bool y depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n +source "drivers/iommu/qcom/Kconfig" + # AMD IOMMU support config AMD_IOMMU bool "AMD IOMMU support" diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 195f7b997d8e..c74d5f45d409 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o +obj-$(CONFIG_QCOM_IOMMU_V1) += qcom/ obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_ARM_SMMU) += arm-smmu.o diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index e6f9b2d745ca..8ddbf513ec93 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2680,40 +2680,9 @@ static struct platform_driver arm_smmu_driver = { .probe = arm_smmu_device_dt_probe, .remove = arm_smmu_device_remove, }; +module_platform_driver(arm_smmu_driver); -static int __init arm_smmu_init(void) -{ - static bool registered; - int ret = 0; - - if (!registered) { - ret = platform_driver_register(&arm_smmu_driver); - registered = !ret; - } - return ret; -} - -static void __exit arm_smmu_exit(void) -{ - return platform_driver_unregister(&arm_smmu_driver); -} - -subsys_initcall(arm_smmu_init); -module_exit(arm_smmu_exit); - -static int __init arm_smmu_of_init(struct device_node *np) -{ - int ret = arm_smmu_init(); - - if (ret) - return ret; - - if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) - return -ENODEV; - - return 0; -} -IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init); +IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 5a9a4416f467..7b53838a083d 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -2050,45 +2050,14 @@ static struct platform_driver arm_smmu_driver = { .probe = arm_smmu_device_dt_probe, .remove = arm_smmu_device_remove, }; - -static int __init arm_smmu_init(void) -{ - static bool registered; - int ret = 0; - - if (!registered) { - ret = platform_driver_register(&arm_smmu_driver); - registered = !ret; - } - return ret; -} - -static void __exit arm_smmu_exit(void) -{ - return platform_driver_unregister(&arm_smmu_driver); -} - -subsys_initcall(arm_smmu_init); -module_exit(arm_smmu_exit); - -static int __init arm_smmu_of_init(struct device_node *np) -{ - int ret = arm_smmu_init(); - - if (ret) - return ret; - - if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) - return -ENODEV; - - return 0; -} -IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init); -IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init); -IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init); +module_platform_driver(arm_smmu_driver); + +IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL); +IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL); +IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL); +IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL); +IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL); +IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 1520e7f02c2f..2cb99d2a2953 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -138,6 +138,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, { struct iova_domain *iovad = cookie_iovad(domain); unsigned long order, base_pfn, end_pfn; + bool pci = dev && dev_is_pci(dev); if (!iovad) return -ENODEV; @@ -160,19 +161,31 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, end_pfn = min_t(unsigned long, end_pfn, domain->geometry.aperture_end >> order); } + /* + * PCI devices may have larger DMA masks, but still prefer allocating + * within a 32-bit mask to avoid DAC addressing. Such limitations don't + * apply to the typical platform device, so for those we may as well + * leave the cache limit at the top of the range they're likely to use. + */ + if (pci) + end_pfn = min_t(unsigned long, end_pfn, + DMA_BIT_MASK(32) >> order); - /* All we can safely do with an existing domain is enlarge it */ + /* start_pfn is always nonzero for an already-initialised domain */ if (iovad->start_pfn) { if (1UL << order != iovad->granule || - base_pfn != iovad->start_pfn || - end_pfn < iovad->dma_32bit_pfn) { + base_pfn != iovad->start_pfn) { pr_warn("Incompatible range for DMA domain\n"); return -EFAULT; } - iovad->dma_32bit_pfn = end_pfn; + /* + * If we have devices with different DMA masks, move the free + * area cache limit down for the benefit of the smaller one. + */ + iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); } else { init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); - if (dev && dev_is_pci(dev)) + if (pci) iova_reserve_pci_windows(to_pci_dev(dev), iovad); } return 0; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 87d3060f8609..507524acf829 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1626,6 +1626,46 @@ out: return ret; } +struct iommu_instance { + struct list_head list; + struct fwnode_handle *fwnode; + const struct iommu_ops *ops; +}; +static LIST_HEAD(iommu_instance_list); +static DEFINE_SPINLOCK(iommu_instance_lock); + +void iommu_register_instance(struct fwnode_handle *fwnode, + const struct iommu_ops *ops) +{ + struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); + + if (WARN_ON(!iommu)) + return; + + of_node_get(to_of_node(fwnode)); + INIT_LIST_HEAD(&iommu->list); + iommu->fwnode = fwnode; + iommu->ops = ops; + spin_lock(&iommu_instance_lock); + list_add_tail(&iommu->list, &iommu_instance_list); + spin_unlock(&iommu_instance_lock); +} + +const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) +{ + struct iommu_instance *instance; + const struct iommu_ops *ops = NULL; + + spin_lock(&iommu_instance_lock); + list_for_each_entry(instance, &iommu_instance_list, list) + if (instance->fwnode == fwnode) { + ops = instance->ops; + break; + } + spin_unlock(&iommu_instance_lock); + return ops; +} + int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops) { diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index b09692bb5b0a..1110b72f5df5 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -411,6 +411,7 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) } __disable_clocks(iommu); list_add(&iommu->dom_node, &priv->list_attached); + iommu->domain = domain; } } @@ -614,8 +615,8 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) goto fail; } - pr_err("Unexpected IOMMU page fault!\n"); - pr_err("base = %08x\n", (unsigned int)iommu->base); + pr_debug("Unexpected IOMMU page fault!\n"); + pr_debug("base = %08x\n", (unsigned int)iommu->base); ret = __enable_clocks(iommu); if (ret) @@ -624,10 +625,16 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) for (i = 0; i < iommu->ncb; i++) { fsr = GET_FSR(iommu->base, i); if (fsr) { - pr_err("Fault occurred in context %d.\n", i); - pr_err("Interesting registers:\n"); - print_ctx_regs(iommu->base, i); + int ret = report_iommu_fault(iommu->domain, + to_msm_priv(iommu->domain)->dev, + GET_FAR(iommu->base, i), 0); + if (ret == -ENOSYS) { + pr_err("Fault occurred in context %d.\n", i); + pr_err("Interesting registers:\n"); + print_ctx_regs(iommu->base, i); + } SET_FSR(iommu->base, i, 0x4000000F); + SET_RESUME(iommu->base, i, 1); } } __disable_clocks(iommu); diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h index 4ca25d50d679..c53016c83037 100644 --- a/drivers/iommu/msm_iommu.h +++ b/drivers/iommu/msm_iommu.h @@ -56,6 +56,8 @@ * dom_node: list head for domain * ctx_list: list of 'struct msm_iommu_ctx_dev' * context_map: Bitmap to track allocated context banks + * domain: iommu domain that this iommu dev is a member of, + * ie. whose msm_priv::list_attached are we on? */ struct msm_iommu_dev { void __iomem *base; @@ -68,6 +70,7 @@ struct msm_iommu_dev { struct list_head dom_node; struct list_head ctx_list; DECLARE_BITMAP(context_map, IOMMU_MAX_CBS); + struct iommu_domain *domain; }; /** diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 5b82862f571f..9529d6ccdea3 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -23,6 +23,7 @@ #include <linux/of.h> #include <linux/of_iommu.h> #include <linux/of_pci.h> +#include <linux/pci.h> #include <linux/slab.h> static const struct of_device_id __iommu_of_table_sentinel @@ -96,42 +97,33 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index, } EXPORT_SYMBOL_GPL(of_get_dma_window); -struct of_iommu_node { - struct list_head list; - struct device_node *np; +static const struct iommu_ops +*of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec) +{ const struct iommu_ops *ops; -}; -static LIST_HEAD(of_iommu_list); -static DEFINE_SPINLOCK(of_iommu_lock); + struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; + int err; -void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops) -{ - struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); - - if (WARN_ON(!iommu)) - return; - - of_node_get(np); - INIT_LIST_HEAD(&iommu->list); - iommu->np = np; - iommu->ops = ops; - spin_lock(&of_iommu_lock); - list_add_tail(&iommu->list, &of_iommu_list); - spin_unlock(&of_iommu_lock); -} + ops = iommu_get_instance(fwnode); + if ((ops && !ops->of_xlate) || + (!ops && !of_match_node(&__iommu_of_table, iommu_spec->np))) + return NULL; -const struct iommu_ops *of_iommu_get_ops(struct device_node *np) -{ - struct of_iommu_node *node; - const struct iommu_ops *ops = NULL; + err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); + if (err) + return ERR_PTR(err); + /* + * The otherwise-empty fwspec handily serves to indicate the specific + * IOMMU device we're waiting for, which will be useful if we ever get + * a proper probe-ordering dependency mechanism in future. + */ + if (!ops) + return ERR_PTR(-EPROBE_DEFER); + + err = ops->of_xlate(dev, iommu_spec); + if (err) + return ERR_PTR(err); - spin_lock(&of_iommu_lock); - list_for_each_entry(node, &of_iommu_list, list) - if (node->np == np) { - ops = node->ops; - break; - } - spin_unlock(&of_iommu_lock); return ops; } @@ -144,10 +136,11 @@ static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) } static const struct iommu_ops -*of_pci_iommu_configure(struct pci_dev *pdev, struct device_node *bridge_np) +*of_pci_iommu_init(struct pci_dev *pdev, struct device_node *bridge_np) { const struct iommu_ops *ops; struct of_phandle_args iommu_spec; + int err; /* * Start by tracing the RID alias down the PCI topology as @@ -162,56 +155,76 @@ static const struct iommu_ops * bus into the system beyond, and which IOMMU it ends up at. */ iommu_spec.np = NULL; - if (of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", - "iommu-map-mask", &iommu_spec.np, iommu_spec.args)) - return NULL; + err = of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", + "iommu-map-mask", &iommu_spec.np, + iommu_spec.args); + if (err) + return ERR_PTR(err); - ops = of_iommu_get_ops(iommu_spec.np); - if (!ops || !ops->of_xlate || - iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || - ops->of_xlate(&pdev->dev, &iommu_spec)) - ops = NULL; + ops = of_iommu_xlate(&pdev->dev, &iommu_spec); of_node_put(iommu_spec.np); return ops; } -const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np) +static const struct iommu_ops +*of_platform_iommu_init(struct device *dev, struct device_node *np) { struct of_phandle_args iommu_spec; - struct device_node *np; const struct iommu_ops *ops = NULL; int idx = 0; - if (dev_is_pci(dev)) - return of_pci_iommu_configure(to_pci_dev(dev), master_np); - /* * We don't currently walk up the tree looking for a parent IOMMU. * See the `Notes:' section of * Documentation/devicetree/bindings/iommu/iommu.txt */ - while (!of_parse_phandle_with_args(master_np, "iommus", - "#iommu-cells", idx, - &iommu_spec)) { - np = iommu_spec.np; - ops = of_iommu_get_ops(np); - - if (!ops || !ops->of_xlate || - iommu_fwspec_init(dev, &np->fwnode, ops) || - ops->of_xlate(dev, &iommu_spec)) - goto err_put_node; - - of_node_put(np); + while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", + idx, &iommu_spec)) { + ops = of_iommu_xlate(dev, &iommu_spec); + of_node_put(iommu_spec.np); idx++; + if (IS_ERR_OR_NULL(ops)) + break; } return ops; +} + +const struct iommu_ops *of_iommu_configure(struct device *dev, + struct device_node *master_np) +{ + const struct iommu_ops *ops; + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + + if (!master_np) + return NULL; + + if (fwspec) { + if (fwspec->ops) + return fwspec->ops; -err_put_node: - of_node_put(np); - return NULL; + /* In the deferred case, start again from scratch */ + iommu_fwspec_free(dev); + } + + if (dev_is_pci(dev)) + ops = of_pci_iommu_init(to_pci_dev(dev), master_np); + else + ops = of_platform_iommu_init(dev, master_np); + /* + * If we have reason to believe the IOMMU driver missed the initial + * add_device callback for dev, replay it to get things in order. + */ + if (!IS_ERR_OR_NULL(ops) && ops->add_device && + dev->bus && !dev->iommu_group) { + int err = ops->add_device(dev); + + if (err) + ops = ERR_PTR(err); + } + + return ops; } static int __init of_iommu_init(void) @@ -222,7 +235,7 @@ static int __init of_iommu_init(void) for_each_matching_node_and_match(np, matches, &match) { const of_iommu_init_fn init_fn = match->data; - if (init_fn(np)) + if (init_fn && init_fn(np)) pr_err("Failed to initialise IOMMU %s\n", of_node_full_name(np)); } diff --git a/drivers/iommu/qcom/Kconfig b/drivers/iommu/qcom/Kconfig new file mode 100644 index 000000000000..27bb0a9e063a --- /dev/null +++ b/drivers/iommu/qcom/Kconfig @@ -0,0 +1,42 @@ +# Qualcomm IOMMU support + +# QCOM IOMMUv1 support +config QCOM_IOMMU_V1 + bool "Qualcomm IOMMUv1 Support" + depends on ARCH_QCOM + select IOMMU_API + select ARM_DMA_USE_IOMMU if ARM + help + Support for the IOMMUs (v1) found on certain Qualcomm SOCs. + These IOMMUs allow virtualization of the address space used by most + cores within the multimedia subsystem. + + If unsure, say N here. + +config MMU500_ACTIVE_PREFETCH_BUG_WITH_SECTION_MAPPING + bool "Don't align virtual address at 1MB boundary" + depends on QCOM_IOMMU_V1 + help + Say Y here if the MMU500 revision has a bug in active prefetch + which can cause TLB corruptions due to 1MB alignment of a buffer. + Here is the sequence which will surface this BUG. + 1) Create a 2-level mapping in v7S format for 1MB buffer. Start of + the buffer should be at even MB boundary. + 2) Create a section mapping for 1MB buffer adjacent to previous + mapping in step 1. + 3) Access last page from 2 level mapping followed by an access into + section mapped area. + 4) Step 3 will result into TLB corruption and this corruption can + lead to any misbehavior (like Permission fault) for sub-sequent + transactions. + + If unsure, say Y here if IOMMU mapping will not exhaust the VA space. + +config IOMMU_PGTABLES_L2 + bool "Allow SMMU page tables in the L2 cache (Experimental)" + depends on QCOM_IOMMU_V1 && MMU && SMP && CPU_DCACHE_DISABLE=n + help + Improves TLB miss latency at the expense of potential L2 pollution. + However, with large multimedia buffers, the TLB should mostly contain + section mappings and TLB misses should be quite infrequent. + Most people can probably say Y here. diff --git a/drivers/iommu/qcom/Makefile b/drivers/iommu/qcom/Makefile new file mode 100644 index 000000000000..e0b1159227be --- /dev/null +++ b/drivers/iommu/qcom/Makefile @@ -0,0 +1,7 @@ +obj-$(CONFIG_QCOM_IOMMU_V1) += qcom-iommu.o + +qcom-iommu-y += msm_iommu.o +qcom-iommu-y += msm_iommu-v1.o +qcom-iommu-y += msm_iommu_dev-v1.o +qcom-iommu-y += msm_iommu_sec.o +qcom-iommu-y += msm_iommu_pagetable.o diff --git a/drivers/iommu/qcom/msm_iommu-v1.c b/drivers/iommu/qcom/msm_iommu-v1.c new file mode 100644 index 000000000000..b4260dabbcd0 --- /dev/null +++ b/drivers/iommu/qcom/msm_iommu-v1.c @@ -0,0 +1,1664 @@ +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/iommu.h> +#include <linux/clk.h> +#include <linux/scatterlist.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_iommu.h> +#include <linux/regulator/consumer.h> +#include <linux/notifier.h> +#include <linux/iopoll.h> +#include <asm/sizes.h> +#include <linux/dma-iommu.h> + +#include "qcom_iommu.h" +#include "msm_iommu_hw-v1.h" +#include "msm_iommu_priv.h" +#include "msm_iommu_perfmon.h" +#include "msm_iommu_pagetable.h" + +#ifdef CONFIG_IOMMU_LPAE +/* bitmap of the page sizes currently supported */ +#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_2M | SZ_32M | SZ_1G) +#else +/* bitmap of the page sizes currently supported */ +#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) +#endif + +#define IOMMU_MSEC_STEP 10 +#define IOMMU_MSEC_TIMEOUT 5000 + +struct msm_iommu_master { + struct list_head list; + unsigned int ctx_num; + struct device *dev; + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; +}; + +static LIST_HEAD(iommu_masters); + +static DEFINE_MUTEX(msm_iommu_lock); +struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS]; + +static int __enable_regulators(struct msm_iommu_drvdata *drvdata) +{ + ++drvdata->powered_on; + + return 0; +} + +static void __disable_regulators(struct msm_iommu_drvdata *drvdata) +{ + --drvdata->powered_on; +} + +static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote) +{ + return 0; +} + +int __enable_clocks(struct msm_iommu_drvdata *drvdata) +{ + int ret; + + ret = clk_prepare_enable(drvdata->iface); + if (ret) + return ret; + + ret = clk_prepare_enable(drvdata->core); + if (ret) + goto err; + + return 0; + +err: + clk_disable_unprepare(drvdata->iface); + return ret; +} + +void __disable_clocks(struct msm_iommu_drvdata *drvdata) +{ + clk_disable_unprepare(drvdata->core); + clk_disable_unprepare(drvdata->iface); +} + +static void _iommu_lock_acquire(unsigned int need_extra_lock) +{ + mutex_lock(&msm_iommu_lock); +} + +static void _iommu_lock_release(unsigned int need_extra_lock) +{ + mutex_unlock(&msm_iommu_lock); +} + +struct iommu_access_ops iommu_access_ops_v1 = { + .iommu_power_on = __enable_regulators, + .iommu_power_off = __disable_regulators, + .iommu_bus_vote = apply_bus_vote, + .iommu_lock_acquire = _iommu_lock_acquire, + .iommu_lock_release = _iommu_lock_release, +}; + +static BLOCKING_NOTIFIER_HEAD(msm_iommu_notifier_list); + +void msm_iommu_register_notify(struct notifier_block *nb) +{ + blocking_notifier_chain_register(&msm_iommu_notifier_list, nb); +} +EXPORT_SYMBOL(msm_iommu_register_notify); + +#ifdef CONFIG_MSM_IOMMU_VBIF_CHECK + +#define VBIF_XIN_HALT_CTRL0 0x200 +#define VBIF_XIN_HALT_CTRL1 0x204 +#define VBIF_AXI_HALT_CTRL0 0x208 +#define VBIF_AXI_HALT_CTRL1 0x20C + +static void __halt_vbif_xin(void __iomem *vbif_base) +{ + pr_err("Halting VBIF_XIN\n"); + writel_relaxed(0xFFFFFFFF, vbif_base + VBIF_XIN_HALT_CTRL0); +} + +static void __dump_vbif_state(void __iomem *base, void __iomem *vbif_base) +{ + unsigned int reg_val; + + reg_val = readl_relaxed(base + MICRO_MMU_CTRL); + pr_err("Value of SMMU_IMPLDEF_MICRO_MMU_CTRL = 0x%x\n", reg_val); + + reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL0); + pr_err("Value of VBIF_XIN_HALT_CTRL0 = 0x%x\n", reg_val); + reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL1); + pr_err("Value of VBIF_XIN_HALT_CTRL1 = 0x%x\n", reg_val); + reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL0); + pr_err("Value of VBIF_AXI_HALT_CTRL0 = 0x%x\n", reg_val); + reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL1); + pr_err("Value of VBIF_AXI_HALT_CTRL1 = 0x%x\n", reg_val); +} + +static int __check_vbif_state(struct msm_iommu_drvdata const *drvdata) +{ + phys_addr_t addr = (phys_addr_t) (drvdata->phys_base + - (phys_addr_t) 0x4000); + void __iomem *base = ioremap(addr, 0x1000); + int ret = 0; + + if (base) { + __dump_vbif_state(drvdata->base, base); + __halt_vbif_xin(base); + __dump_vbif_state(drvdata->base, base); + iounmap(base); + } else { + pr_err("%s: Unable to ioremap\n", __func__); + ret = -ENOMEM; + } + return ret; +} + +static void check_halt_state(struct msm_iommu_drvdata const *drvdata) +{ + int res; + unsigned int val; + void __iomem *base = drvdata->base; + char const *name = drvdata->name; + + pr_err("Timed out waiting for IOMMU halt to complete for %s\n", name); + res = __check_vbif_state(drvdata); + if (res) + BUG(); + + pr_err("Checking if IOMMU halt completed for %s\n", name); + + res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val, + (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000); + + if (res) { + pr_err("Timed out (again) waiting for IOMMU halt to complete for %s\n", + name); + } else { + pr_err("IOMMU halt completed. VBIF FIFO most likely not getting drained by master\n"); + } + BUG(); +} + +static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata, + int ctx, struct msm_iommu_priv *priv) +{ + int res; + unsigned int val; + void __iomem *base = drvdata->cb_base; + char const *name = drvdata->name; + + pr_err("Timed out waiting for TLB SYNC to complete for %s (client: %s)\n", + name, priv->client_name); + blocking_notifier_call_chain(&msm_iommu_notifier_list, TLB_SYNC_TIMEOUT, + (void *) priv->client_name); + res = __check_vbif_state(drvdata); + if (res) + BUG(); + + pr_err("Checking if TLB sync completed for %s\n", name); + + res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val, + (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000); + if (res) { + pr_err("Timed out (again) waiting for TLB SYNC to complete for %s\n", + name); + } else { + pr_err("TLB Sync completed. VBIF FIFO most likely not getting drained by master\n"); + } + BUG(); +} + +#else + +/* + * For targets without VBIF or for targets with the VBIF check disabled + * we directly just crash to capture the issue + */ +static void check_halt_state(struct msm_iommu_drvdata const *drvdata) +{ + BUG(); +} + +static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata, + int ctx, struct msm_iommu_priv *priv) +{ + BUG(); +} + +#endif + +void iommu_halt(struct msm_iommu_drvdata const *iommu_drvdata) +{ + void __iomem *base = iommu_drvdata->base; + unsigned int val; + int res; + + if (!iommu_drvdata->halt_enabled) + return; + + SET_MICRO_MMU_CTRL_HALT_REQ(base, 1); + res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val, + (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000); + if (res) + check_halt_state(iommu_drvdata); + + /* Ensure device is idle before continuing */ + mb(); +} + +void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata) +{ + if (!iommu_drvdata->halt_enabled) + return; + + /* Ensure transactions have completed before releasing the halt */ + mb(); + + SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0); + + /* + * Ensure write is complete before continuing to ensure + * we don't turn off clocks while transaction is still + * pending. + */ + mb(); +} + +static void __sync_tlb(struct msm_iommu_drvdata *iommu_drvdata, int ctx, + struct msm_iommu_priv *priv) +{ + void __iomem *base = iommu_drvdata->cb_base; + unsigned int val; + unsigned int res; + + SET_TLBSYNC(base, ctx, 0); + /* No barrier needed due to read dependency */ + + res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val, + (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000); + if (res) + check_tlb_sync_state(iommu_drvdata, ctx, priv); +} + +#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP +static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va) +{ + struct msm_iommu_priv *priv = to_msm_priv(domain); + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + int ret = 0; + + list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { + BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent); + + iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); + BUG_ON(!iommu_drvdata); + + SET_TLBIVA(iommu_drvdata->cb_base, ctx_drvdata->num, + ctx_drvdata->asid | (va & CB_TLBIVA_VA)); + mb(); + __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv); + } + + return ret; +} +#endif + +static int __flush_iotlb(struct iommu_domain *domain) +{ + struct msm_iommu_priv *priv = to_msm_priv(domain); + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + int ret = 0; + + list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { + BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent); + + iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); + BUG_ON(!iommu_drvdata); + + SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num, + ctx_drvdata->asid); + __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv); + } + + return ret; +} + +/* + * May only be called for non-secure iommus + */ +static void __reset_iommu(struct msm_iommu_drvdata *iommu_drvdata) +{ + void __iomem *base = iommu_drvdata->base; + int i, smt_size, res; + unsigned long val; + + /* SMMU_ACR is an implementation defined register. + * Resetting is not required for some implementation. + */ + if (iommu_drvdata->model != MMU_500) + SET_ACR(base, 0); + SET_CR2(base, 0); + SET_GFAR(base, 0); + SET_GFSRRESTORE(base, 0); + + /* Invalidate the entire non-secure TLB */ + SET_TLBIALLNSNH(base, 0); + SET_TLBGSYNC(base, 0); + res = readl_poll_timeout(GLB_REG(TLBGSTATUS, base), val, + (val & TLBGSTATUS_GSACTIVE) == 0, 0, 5000000); + if (res) + BUG(); + + smt_size = GET_IDR0_NUMSMRG(base); + + for (i = 0; i < smt_size; i++) + SET_SMR_VALID(base, i, 0); + + mb(); +} + +static void __reset_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata) +{ + void __iomem *base = iommu_drvdata->base; + + if (iommu_drvdata->model != MMU_500) + SET_NSACR(base, 0); + SET_NSCR2(base, 0); + SET_NSGFAR(base, 0); + SET_NSGFSRRESTORE(base, 0); + mb(); +} + +static void __program_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata) +{ + void __iomem *base = iommu_drvdata->base; + + if (iommu_drvdata->model == MMU_500) { + SET_NSACR_SMTNMC_BPTLBEN(base, 1); + SET_NSACR_MMUDIS_BPTLBEN(base, 1); + SET_NSACR_S2CR_BPTLBEN(base, 1); + } + SET_NSCR0_SMCFCFG(base, 1); + SET_NSCR0_USFCFG(base, 1); + SET_NSCR0_STALLD(base, 1); + SET_NSCR0_GCFGFIE(base, 1); + SET_NSCR0_GCFGFRE(base, 1); + SET_NSCR0_GFIE(base, 1); + SET_NSCR0_GFRE(base, 1); + SET_NSCR0_CLIENTPD(base, 0); +} + +/* + * May only be called for non-secure iommus + */ +static void __program_iommu(struct msm_iommu_drvdata *drvdata) +{ + __reset_iommu(drvdata); + + if (!msm_iommu_get_scm_call_avail()) + __reset_iommu_secure(drvdata); + + if (drvdata->model == MMU_500) { + SET_ACR_SMTNMC_BPTLBEN(drvdata->base, 1); + SET_ACR_MMUDIS_BPTLBEN(drvdata->base, 1); + SET_ACR_S2CR_BPTLBEN(drvdata->base, 1); + } + SET_CR0_SMCFCFG(drvdata->base, 1); + SET_CR0_USFCFG(drvdata->base, 1); + SET_CR0_STALLD(drvdata->base, 1); + SET_CR0_GCFGFIE(drvdata->base, 1); + SET_CR0_GCFGFRE(drvdata->base, 1); + SET_CR0_GFIE(drvdata->base, 1); + SET_CR0_GFRE(drvdata->base, 1); + SET_CR0_CLIENTPD(drvdata->base, 0); + + if (!msm_iommu_get_scm_call_avail()) + __program_iommu_secure(drvdata); + + if (drvdata->smmu_local_base) + writel_relaxed(0xFFFFFFFF, + drvdata->smmu_local_base + SMMU_INTR_SEL_NS); + + mb(); /* Make sure writes complete before returning */ +} + +void program_iommu_bfb_settings(void __iomem *base, + const struct msm_iommu_bfb_settings *bfb_settings) +{ + unsigned int i; + + if (bfb_settings) + for (i = 0; i < bfb_settings->length; i++) + SET_GLOBAL_REG(base, bfb_settings->regs[i], + bfb_settings->data[i]); + + /* Make sure writes complete before returning */ + mb(); +} + +static void __reset_context(struct msm_iommu_drvdata *iommu_drvdata, int ctx) +{ + void __iomem *base = iommu_drvdata->cb_base; + + /* Don't set ACTLR to zero because if context bank is in + * bypass mode (say after iommu_detach), still this ACTLR + * value matters for micro-TLB caching. + */ + if (iommu_drvdata->model != MMU_500) + SET_ACTLR(base, ctx, 0); + SET_FAR(base, ctx, 0); + SET_FSRRESTORE(base, ctx, 0); + SET_NMRR(base, ctx, 0); + SET_PAR(base, ctx, 0); + SET_PRRR(base, ctx, 0); + SET_SCTLR(base, ctx, 0); + SET_TTBCR(base, ctx, 0); + SET_TTBR0(base, ctx, 0); + SET_TTBR1(base, ctx, 0); + mb(); +} + +static void __release_smg(void __iomem *base) +{ + int i, smt_size; + smt_size = GET_IDR0_NUMSMRG(base); + + /* Invalidate all SMGs */ + for (i = 0; i < smt_size; i++) + if (GET_SMR_VALID(base, i)) + SET_SMR_VALID(base, i, 0); +} + +#ifdef CONFIG_IOMMU_LPAE +static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num, + unsigned int asid) +{ + SET_CB_TTBR0_ASID(base, ctx_num, asid); +} +#else +static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num, + unsigned int asid) +{ + SET_CB_CONTEXTIDR_ASID(base, ctx_num, asid); +} +#endif + +static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata, + struct msm_iommu_ctx_drvdata *curr_ctx, + struct msm_iommu_priv *priv) +{ + void __iomem *cb_base = iommu_drvdata->cb_base; + + curr_ctx->asid = curr_ctx->num; + msm_iommu_set_ASID(cb_base, curr_ctx->num, curr_ctx->asid); +} + +#ifdef CONFIG_IOMMU_LPAE +static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx) +{ + SET_CB_TTBCR_EAE(base, ctx, 1); /* Extended Address Enable (EAE) */ +} + +static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx) +{ + SET_CB_MAIR0(base, ctx, msm_iommu_get_mair0()); + SET_CB_MAIR1(base, ctx, msm_iommu_get_mair1()); +} + +static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx) +{ + /* + * Configure page tables as inner-cacheable and shareable to reduce + * the TLB miss penalty. + */ + SET_CB_TTBCR_SH0(base, ctx, 3); /* Inner shareable */ + SET_CB_TTBCR_ORGN0(base, ctx, 1); /* outer cachable*/ + SET_CB_TTBCR_IRGN0(base, ctx, 1); /* inner cachable*/ + SET_CB_TTBCR_T0SZ(base, ctx, 0); /* 0GB-4GB */ + + + SET_CB_TTBCR_SH1(base, ctx, 3); /* Inner shareable */ + SET_CB_TTBCR_ORGN1(base, ctx, 1); /* outer cachable*/ + SET_CB_TTBCR_IRGN1(base, ctx, 1); /* inner cachable*/ + SET_CB_TTBCR_T1SZ(base, ctx, 0); /* TTBR1 not used */ +} + +#else + +static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx) +{ + /* Turn on TEX Remap */ + SET_CB_SCTLR_TRE(base, ctx, 1); +} + +static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx) +{ + SET_PRRR(base, ctx, msm_iommu_get_prrr()); + SET_NMRR(base, ctx, msm_iommu_get_nmrr()); +} + +static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx) +{ + /* Configure page tables as inner-cacheable and shareable to reduce + * the TLB miss penalty. + */ + SET_CB_TTBR0_S(base, ctx, 1); + SET_CB_TTBR0_NOS(base, ctx, 1); + SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */ + SET_CB_TTBR0_IRGN0(base, ctx, 1); + SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */ +} + +#endif + +static int program_m2v_table(struct device *dev, void __iomem *base) +{ + struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_get_drvdata(dev); + u32 *sids = ctx_drvdata->sids; + u32 *sid_mask = ctx_drvdata->sid_mask; + unsigned int ctx = ctx_drvdata->num; + int num = 0, i, smt_size; + int len = ctx_drvdata->nsid; + + smt_size = GET_IDR0_NUMSMRG(base); + + /* Program the M2V tables for this context */ + for (i = 0; i < len / sizeof(*sids); i++) { + for (; num < smt_size; num++) + if (GET_SMR_VALID(base, num) == 0) + break; + BUG_ON(num >= smt_size); + + SET_SMR_VALID(base, num, 1); + SET_SMR_MASK(base, num, sid_mask[i]); + SET_SMR_ID(base, num, sids[i]); + + SET_S2CR_N(base, num, 0); + SET_S2CR_CBNDX(base, num, ctx); + SET_S2CR_MEMATTR(base, num, 0x0A); + /* Set security bit override to be Non-secure */ + SET_S2CR_NSCFG(base, num, 3); + } + + return 0; +} + +static void program_all_m2v_tables(struct msm_iommu_drvdata *iommu_drvdata) +{ + device_for_each_child(iommu_drvdata->dev, iommu_drvdata->base, + program_m2v_table); +} + +static void __program_context(struct msm_iommu_drvdata *iommu_drvdata, + struct msm_iommu_ctx_drvdata *ctx_drvdata, + struct msm_iommu_priv *priv, bool is_secure, + bool program_m2v) +{ + phys_addr_t pn; + void __iomem *base = iommu_drvdata->base; + void __iomem *cb_base = iommu_drvdata->cb_base; + unsigned int ctx = ctx_drvdata->num; + phys_addr_t pgtable = __pa(priv->pt.fl_table); + + __reset_context(iommu_drvdata, ctx); + msm_iommu_setup_ctx(cb_base, ctx); + + if (priv->pt.redirect) + msm_iommu_setup_pg_l2_redirect(cb_base, ctx); + + msm_iommu_setup_memory_remap(cb_base, ctx); + + pn = pgtable >> CB_TTBR0_ADDR_SHIFT; + SET_CB_TTBR0_ADDR(cb_base, ctx, pn); + + /* Enable context fault interrupt */ + SET_CB_SCTLR_CFIE(cb_base, ctx, 1); + + if (iommu_drvdata->model != MMU_500) { + /* Redirect all cacheable requests to L2 slave port. */ + SET_CB_ACTLR_BPRCISH(cb_base, ctx, 1); + SET_CB_ACTLR_BPRCOSH(cb_base, ctx, 1); + SET_CB_ACTLR_BPRCNSH(cb_base, ctx, 1); + } + + /* Enable private ASID namespace */ + SET_CB_SCTLR_ASIDPNE(cb_base, ctx, 1); + + if (!is_secure) { + if (program_m2v) + program_all_m2v_tables(iommu_drvdata); + + SET_CBAR_N(base, ctx, 0); + + /* Stage 1 Context with Stage 2 bypass */ + SET_CBAR_TYPE(base, ctx, 1); + + /* Route page faults to the non-secure interrupt */ + SET_CBAR_IRPTNDX(base, ctx, 1); + + /* Set VMID to non-secure HLOS */ + SET_CBAR_VMID(base, ctx, 3); + + /* Bypass is treated as inner-shareable */ + SET_CBAR_BPSHCFG(base, ctx, 2); + + /* Do not downgrade memory attributes */ + SET_CBAR_MEMATTR(base, ctx, 0x0A); + + } + + msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv); + + /* Ensure that ASID assignment has completed before we use + * ASID for TLB invalidation. Here, mb() is required because + * both these registers are separated by more than 1KB. */ + mb(); + + SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num, + ctx_drvdata->asid); + __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv); + + /* Enable the MMU */ + SET_CB_SCTLR_M(cb_base, ctx, 1); + mb(); +} + +#ifdef CONFIG_IOMMU_PGTABLES_L2 +#define INITIAL_REDIRECT_VAL 1 +#else +#define INITIAL_REDIRECT_VAL 0 +#endif + +static struct msm_iommu_master *msm_iommu_find_master(struct device *dev) +{ + struct msm_iommu_master *master; + bool found = false; + + list_for_each_entry(master, &iommu_masters, list) { + if (master && master->dev == dev) { + found = true; + break; + } + } + + if (found) { + dev_dbg(dev, "found master %s with ctx:%d\n", + dev_name(master->dev), + master->ctx_num); + return master; + } + + return ERR_PTR(-ENODEV); +} + +static struct iommu_domain *msm_iommu_domain_alloc(unsigned type) +{ + struct msm_iommu_priv *priv; + struct iommu_domain *domain; + int ret; + + if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) + return ERR_PTR(-EINVAL); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return ERR_PTR(-ENOMEM); + + priv->pt.redirect = INITIAL_REDIRECT_VAL; + + INIT_LIST_HEAD(&priv->list_attached); + + ret = msm_iommu_pagetable_alloc(&priv->pt); + if (ret) { + kfree(priv); + return ERR_PTR(ret); + } + + domain = &priv->domain; + + if (type == IOMMU_DOMAIN_DMA) { + ret = iommu_get_dma_cookie(domain); + if (ret) + goto err; + } + + return domain; + +err: + msm_iommu_pagetable_free(&priv->pt); + kfree(priv); + return ERR_PTR(ret); +} + +static void msm_iommu_domain_free(struct iommu_domain *domain) +{ + struct msm_iommu_priv *priv; + + mutex_lock(&msm_iommu_lock); + priv = to_msm_priv(domain); + if (priv) + msm_iommu_pagetable_free(&priv->pt); + + if (domain->type == IOMMU_DOMAIN_DMA) + iommu_put_dma_cookie(domain); + + kfree(priv); + mutex_unlock(&msm_iommu_lock); +} + +static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct msm_iommu_priv *priv; + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + struct msm_iommu_ctx_drvdata *tmp_drvdata; + struct msm_iommu_master *master; + int ret = 0; + int is_secure; + bool set_m2v = false; + + mutex_lock(&msm_iommu_lock); + + priv = to_msm_priv(domain); + if (!priv || !dev) { + ret = -EINVAL; + goto unlock; + } + + master = msm_iommu_find_master(dev); + if (IS_ERR(master)) { + /* if error use legacy api */ + iommu_drvdata = dev_get_drvdata(dev->parent); + ctx_drvdata = dev_get_drvdata(dev); + } else { + iommu_drvdata = master->iommu_drvdata; + ctx_drvdata = master->ctx_drvdata; + } + + if (!iommu_drvdata || !ctx_drvdata) { + ret = -EINVAL; + goto unlock; + } + + ++ctx_drvdata->attach_count; + + if (ctx_drvdata->attach_count > 1) + goto already_attached; + + if (!list_empty(&ctx_drvdata->attached_elm)) { + ret = -EBUSY; + goto unlock; + } + + list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) + if (tmp_drvdata == ctx_drvdata) { + ret = -EBUSY; + goto unlock; + } + + is_secure = iommu_drvdata->sec_id != -1; + + ret = __enable_regulators(iommu_drvdata); + if (ret) + goto unlock; + + ret = apply_bus_vote(iommu_drvdata, 1); + if (ret) + goto unlock; + + /* We can only do this once */ + if (!iommu_drvdata->ctx_attach_count) { + if (!is_secure) { + iommu_halt(iommu_drvdata); + __program_iommu(iommu_drvdata); + iommu_resume(iommu_drvdata); + } else { + ret = msm_iommu_sec_program_iommu(iommu_drvdata, + ctx_drvdata); + if (ret) { + __disable_regulators(iommu_drvdata); + goto unlock; + } + } + program_iommu_bfb_settings(iommu_drvdata->base, + iommu_drvdata->bfb_settings); + set_m2v = true; + } + + iommu_halt(iommu_drvdata); + + __program_context(iommu_drvdata, ctx_drvdata, priv, is_secure, set_m2v); + + iommu_resume(iommu_drvdata); + + /* Ensure TLB is clear */ + if (iommu_drvdata->model != MMU_500) { + SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num, + ctx_drvdata->asid); + __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv); + } + + list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); + ctx_drvdata->attached_domain = domain; + ++iommu_drvdata->ctx_attach_count; + +already_attached: + mutex_unlock(&msm_iommu_lock); + + msm_iommu_attached(dev->parent); + return ret; +unlock: + mutex_unlock(&msm_iommu_lock); + return ret; +} + +static void msm_iommu_detach_dev(struct iommu_domain *domain, + struct device *dev) +{ + struct msm_iommu_priv *priv; + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + struct msm_iommu_master *master; + int is_secure; + int ret; + + if (!dev) + return; + + msm_iommu_detached(dev->parent); + + mutex_lock(&msm_iommu_lock); + priv = to_msm_priv(domain); + if (!priv) + goto unlock; + + master = msm_iommu_find_master(dev); + if (IS_ERR(master)) { + ret = PTR_ERR(master); + goto unlock; + } + + iommu_drvdata = master->iommu_drvdata; + ctx_drvdata = master->ctx_drvdata; + + if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain) + goto unlock; + + --ctx_drvdata->attach_count; + BUG_ON(ctx_drvdata->attach_count < 0); + + if (ctx_drvdata->attach_count > 0) + goto unlock; + + is_secure = iommu_drvdata->sec_id != -1; + + if (iommu_drvdata->model == MMU_500) { + SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num, + ctx_drvdata->asid); + __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv); + } + + ctx_drvdata->asid = -1; + + __reset_context(iommu_drvdata, ctx_drvdata->num); + + /* + * Only reset the M2V tables on the very last detach */ + if (!is_secure && iommu_drvdata->ctx_attach_count == 1) { + iommu_halt(iommu_drvdata); + __release_smg(iommu_drvdata->base); + iommu_resume(iommu_drvdata); + } + + apply_bus_vote(iommu_drvdata, 0); + + __disable_regulators(iommu_drvdata); + + list_del_init(&ctx_drvdata->attached_elm); + ctx_drvdata->attached_domain = NULL; + BUG_ON(iommu_drvdata->ctx_attach_count == 0); + --iommu_drvdata->ctx_attach_count; +unlock: + mutex_unlock(&msm_iommu_lock); +} + +static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, + phys_addr_t pa, size_t len, int prot) +{ + struct msm_iommu_priv *priv; + int ret = 0; + + mutex_lock(&msm_iommu_lock); + + priv = to_msm_priv(domain); + if (!priv) { + ret = -EINVAL; + goto fail; + } + + ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot); + if (ret) + goto fail; + +#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP + ret = __flush_iotlb_va(domain, va); +#endif + +fail: + mutex_unlock(&msm_iommu_lock); + return ret; +} + +static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, + size_t len) +{ + struct msm_iommu_priv *priv; + int ret = -ENODEV; + + mutex_lock(&msm_iommu_lock); + + priv = to_msm_priv(domain); + if (!priv) + goto fail; + + ret = msm_iommu_pagetable_unmap(&priv->pt, va, len); + if (ret < 0) + goto fail; + + ret = __flush_iotlb(domain); + + msm_iommu_pagetable_free_tables(&priv->pt, va, len); +fail: + mutex_unlock(&msm_iommu_lock); + + /* the IOMMU API requires us to return how many bytes were unmapped */ + len = ret ? 0 : len; + return len; +} + +static size_t msm_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, + int prot) +{ + struct msm_iommu_priv *priv; + struct scatterlist *tmp; + unsigned int len = 0; + int ret, i; + + mutex_lock(&msm_iommu_lock); + + priv = to_msm_priv(domain); + if (!priv) { + ret = -EINVAL; + goto fail; + } + + for_each_sg(sg, tmp, nents, i) + len += tmp->length; + + ret = msm_iommu_pagetable_map_range(&priv->pt, iova, sg, len, prot); + if (ret < 0) + goto fail; + + ret = len; + +#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP + __flush_iotlb(domain); +#endif + +fail: + mutex_unlock(&msm_iommu_lock); + return ret; +} + +static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va, + unsigned int len) +{ + struct msm_iommu_priv *priv; + + mutex_lock(&msm_iommu_lock); + + priv = to_msm_priv(domain); + msm_iommu_pagetable_unmap_range(&priv->pt, va, len); + + __flush_iotlb(domain); + + msm_iommu_pagetable_free_tables(&priv->pt, va, len); + mutex_unlock(&msm_iommu_lock); + + return 0; +} + +#ifdef CONFIG_IOMMU_LPAE +static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par) +{ + phys_addr_t phy; + + /* Upper 28 bits from PAR, lower 12 from VA */ + phy = (par & 0xFFFFFFF000ULL) | (va & 0x00000FFF); + + return phy; +} +#else +static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par) +{ + phys_addr_t phy; + + /* We are dealing with a supersection */ + if (par & CB_PAR_SS) + phy = (par & 0xFF000000) | (va & 0x00FFFFFF); + else /* Upper 20 bits from PAR, lower 12 from VA */ + phy = (par & 0xFFFFF000) | (va & 0x00000FFF); + + return phy; +} +#endif + +static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, + phys_addr_t va) +{ + struct msm_iommu_priv *priv; + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + u64 par; + void __iomem *base; + phys_addr_t ret = 0; + int ctx; + int i; + + mutex_lock(&msm_iommu_lock); + + priv = to_msm_priv(domain); + if (list_empty(&priv->list_attached)) + goto fail; + + ctx_drvdata = list_entry(priv->list_attached.next, + struct msm_iommu_ctx_drvdata, attached_elm); + iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); + + if (iommu_drvdata->model == MMU_500) { + ret = msm_iommu_iova_to_phys_soft(domain, va); + mutex_unlock(&msm_iommu_lock); + return ret; + } + + base = iommu_drvdata->cb_base; + ctx = ctx_drvdata->num; + + SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR); + mb(); + for (i = 0; i < IOMMU_MSEC_TIMEOUT; i += IOMMU_MSEC_STEP) + if (GET_CB_ATSR_ACTIVE(base, ctx) == 0) + break; + else + msleep(IOMMU_MSEC_STEP); + + if (i >= IOMMU_MSEC_TIMEOUT) { + pr_err("%s: iova to phys timed out on %pa for %s (%s)\n", + __func__, &va, iommu_drvdata->name, ctx_drvdata->name); + ret = 0; + goto fail; + } + + par = GET_PAR(base, ctx); + + if (par & CB_PAR_F) { + unsigned int level = (par & CB_PAR_PLVL) >> CB_PAR_PLVL_SHIFT; + pr_err("IOMMU translation fault!\n"); + pr_err("name = %s\n", iommu_drvdata->name); + pr_err("context = %s (%d)\n", ctx_drvdata->name, + ctx_drvdata->num); + pr_err("Interesting registers:\n"); + pr_err("PAR = %16llx [%s%s%s%s%s%s%s%sPLVL%u %s]\n", par, + (par & CB_PAR_F) ? "F " : "", + (par & CB_PAR_TF) ? "TF " : "", + (par & CB_PAR_AFF) ? "AFF " : "", + (par & CB_PAR_PF) ? "PF " : "", + (par & CB_PAR_EF) ? "EF " : "", + (par & CB_PAR_TLBMCF) ? "TLBMCF " : "", + (par & CB_PAR_TLBLKF) ? "TLBLKF " : "", + (par & CB_PAR_ATOT) ? "ATOT " : "", + level, + (par & CB_PAR_STAGE) ? "S2 " : "S1 "); + ret = 0; + } else { + ret = msm_iommu_get_phy_from_PAR(va, par); + } + +fail: + mutex_unlock(&msm_iommu_lock); + + return ret; +} + +static int msm_iommu_add_device(struct device *dev) +{ + struct iommu_group *group; + + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) + return PTR_ERR(group); + + return 0; +} + +static void msm_iommu_remove_device(struct device *dev) +{ + iommu_group_remove_device(dev); +} + +static struct iommu_group *msm_iommu_device_group(struct device *dev) +{ + struct msm_iommu_master *master; + struct iommu_group *group; + + group = generic_device_group(dev); + if (IS_ERR(group)) + return group; + + master = msm_iommu_find_master(dev); + if (IS_ERR(master)) { + iommu_group_put(group); + return ERR_CAST(master); + } + + return group; +} + +#ifdef CONFIG_IOMMU_LPAE +static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[]) +{ + pr_err("MAIR0 = %08x MAIR1 = %08x\n", + regs[DUMP_REG_MAIR0].val, regs[DUMP_REG_MAIR1].val); +} +#else +static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[]) +{ + pr_err("PRRR = %08x NMRR = %08x\n", + regs[DUMP_REG_PRRR].val, regs[DUMP_REG_NMRR].val); +} +#endif + +void print_ctx_regs(struct msm_iommu_context_reg regs[]) +{ + uint32_t fsr = regs[DUMP_REG_FSR].val; + u64 ttbr; + enum dump_reg iter; + + pr_err("FAR = %016llx\n", + COMBINE_DUMP_REG( + regs[DUMP_REG_FAR1].val, + regs[DUMP_REG_FAR0].val)); + pr_err("PAR = %016llx\n", + COMBINE_DUMP_REG( + regs[DUMP_REG_PAR1].val, + regs[DUMP_REG_PAR0].val)); + pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr, + (fsr & 0x02) ? "TF " : "", + (fsr & 0x04) ? "AFF " : "", + (fsr & 0x08) ? "PF " : "", + (fsr & 0x10) ? "EF " : "", + (fsr & 0x20) ? "TLBMCF " : "", + (fsr & 0x40) ? "TLBLKF " : "", + (fsr & 0x80) ? "MHF " : "", + (fsr & 0x40000000) ? "SS " : "", + (fsr & 0x80000000) ? "MULTI " : ""); + + pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", + regs[DUMP_REG_FSYNR0].val, regs[DUMP_REG_FSYNR1].val); + + ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR0_1].val, + regs[DUMP_REG_TTBR0_0].val); + if (regs[DUMP_REG_TTBR0_1].valid) + pr_err("TTBR0 = %016llx\n", ttbr); + else + pr_err("TTBR0 = %016llx (32b)\n", ttbr); + + ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR1_1].val, + regs[DUMP_REG_TTBR1_0].val); + + if (regs[DUMP_REG_TTBR1_1].valid) + pr_err("TTBR1 = %016llx\n", ttbr); + else + pr_err("TTBR1 = %016llx (32b)\n", ttbr); + + pr_err("SCTLR = %08x ACTLR = %08x\n", + regs[DUMP_REG_SCTLR].val, regs[DUMP_REG_ACTLR].val); + pr_err("CBAR = %08x CBFRSYNRA = %08x\n", + regs[DUMP_REG_CBAR_N].val, regs[DUMP_REG_CBFRSYNRA_N].val); + print_ctx_mem_attr_regs(regs); + + for (iter = DUMP_REG_FIRST; iter < MAX_DUMP_REGS; ++iter) + if (!regs[iter].valid) + pr_err("NOTE: Value actually unknown for %s\n", + dump_regs_tbl[iter].name); +} + +static void __print_ctx_regs(struct msm_iommu_drvdata *drvdata, int ctx, + unsigned int fsr) +{ + void __iomem *base = drvdata->base; + void __iomem *cb_base = drvdata->cb_base; + bool is_secure = drvdata->sec_id != -1; + + struct msm_iommu_context_reg regs[MAX_DUMP_REGS]; + unsigned int i; + memset(regs, 0, sizeof(regs)); + + for (i = DUMP_REG_FIRST; i < MAX_DUMP_REGS; ++i) { + struct msm_iommu_context_reg *r = ®s[i]; + unsigned long regaddr = dump_regs_tbl[i].reg_offset; + if (is_secure && + dump_regs_tbl[i].dump_reg_type != DRT_CTX_REG) { + r->valid = 0; + continue; + } + r->valid = 1; + switch (dump_regs_tbl[i].dump_reg_type) { + case DRT_CTX_REG: + r->val = GET_CTX_REG(regaddr, cb_base, ctx); + break; + case DRT_GLOBAL_REG: + r->val = GET_GLOBAL_REG(regaddr, base); + break; + case DRT_GLOBAL_REG_N: + r->val = GET_GLOBAL_REG_N(regaddr, ctx, base); + break; + default: + pr_info("Unknown dump_reg_type...\n"); + r->valid = 0; + break; + } + } + print_ctx_regs(regs); +} + +static void print_global_regs(void __iomem *base, unsigned int gfsr) +{ + pr_err("GFAR = %016llx\n", GET_GFAR(base)); + + pr_err("GFSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", gfsr, + (gfsr & 0x01) ? "ICF " : "", + (gfsr & 0x02) ? "USF " : "", + (gfsr & 0x04) ? "SMCF " : "", + (gfsr & 0x08) ? "UCBF " : "", + (gfsr & 0x10) ? "UCIF " : "", + (gfsr & 0x20) ? "CAF " : "", + (gfsr & 0x40) ? "EF " : "", + (gfsr & 0x80) ? "PF " : "", + (gfsr & 0x40000000) ? "SS " : "", + (gfsr & 0x80000000) ? "MULTI " : ""); + + pr_err("GFSYNR0 = %08x\n", GET_GFSYNR0(base)); + pr_err("GFSYNR1 = %08x\n", GET_GFSYNR1(base)); + pr_err("GFSYNR2 = %08x\n", GET_GFSYNR2(base)); +} + +irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id) +{ + struct platform_device *pdev = dev_id; + struct msm_iommu_drvdata *drvdata; + unsigned int gfsr; + int ret; + + mutex_lock(&msm_iommu_lock); + BUG_ON(!pdev); + + drvdata = dev_get_drvdata(&pdev->dev); + BUG_ON(!drvdata); + + if (!drvdata->powered_on) { + pr_err("Unexpected IOMMU global fault !!\n"); + pr_err("name = %s\n", drvdata->name); + pr_err("Power is OFF. Can't read global fault information\n"); + ret = IRQ_HANDLED; + goto fail; + } + + if (drvdata->sec_id != -1) { + pr_err("NON-secure interrupt from secure %s\n", drvdata->name); + ret = IRQ_HANDLED; + goto fail; + } + + gfsr = GET_GFSR(drvdata->base); + if (gfsr) { + pr_err("Unexpected %s global fault !!\n", drvdata->name); + print_global_regs(drvdata->base, gfsr); + SET_GFSR(drvdata->base, gfsr); + ret = IRQ_HANDLED; + } else + ret = IRQ_NONE; + +fail: + mutex_unlock(&msm_iommu_lock); + + return ret; +} + +irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id) +{ + struct platform_device *pdev = dev_id; + struct msm_iommu_drvdata *drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + unsigned int fsr; + int ret; + + phys_addr_t pagetable_phys; + u64 faulty_iova = 0; + + mutex_lock(&msm_iommu_lock); + + BUG_ON(!pdev); + + drvdata = dev_get_drvdata(pdev->dev.parent); + BUG_ON(!drvdata); + + ctx_drvdata = dev_get_drvdata(&pdev->dev); + BUG_ON(!ctx_drvdata); + + if (!drvdata->powered_on) { + pr_err("Unexpected IOMMU page fault!\n"); + pr_err("name = %s\n", drvdata->name); + pr_err("Power is OFF. Unable to read page fault information\n"); + /* + * We cannot determine which context bank caused the issue so + * we just return handled here to ensure IRQ handler code is + * happy + */ + ret = IRQ_HANDLED; + goto fail; + } + + ret = __enable_clocks(drvdata); + if (ret) { + ret = IRQ_NONE; + goto fail; + } + + fsr = GET_FSR(drvdata->cb_base, ctx_drvdata->num); + if (fsr) { + if (!ctx_drvdata->attached_domain) { + pr_err("Bad domain in interrupt handler\n"); + ret = -ENOSYS; + } else { + faulty_iova = + GET_FAR(drvdata->cb_base, ctx_drvdata->num); + ret = report_iommu_fault(ctx_drvdata->attached_domain, + &ctx_drvdata->pdev->dev, + faulty_iova, 0); + + } + if (ret == -ENOSYS) { + pr_err("Unexpected IOMMU page fault!\n"); + pr_err("name = %s\n", drvdata->name); + pr_err("context = %s (%d)\n", ctx_drvdata->name, + ctx_drvdata->num); + pr_err("Interesting registers:\n"); + __print_ctx_regs(drvdata, + ctx_drvdata->num, fsr); + + if (ctx_drvdata->attached_domain) { + pagetable_phys = msm_iommu_iova_to_phys_soft( + ctx_drvdata->attached_domain, + faulty_iova); + pr_err("Page table in DDR shows PA = %x\n", + (unsigned int) pagetable_phys); + } + } + + if (ret != -EBUSY) + SET_FSR(drvdata->cb_base, ctx_drvdata->num, fsr); + ret = IRQ_HANDLED; + } else + ret = IRQ_NONE; + + __disable_clocks(drvdata); +fail: + mutex_unlock(&msm_iommu_lock); + + return ret; +} + +static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain) +{ + struct msm_iommu_priv *priv = to_msm_priv(domain); + + return __pa(priv->pt.fl_table); +} + +#define DUMP_REG_INIT(dump_reg, cb_reg, mbp, drt) \ + do { \ + dump_regs_tbl[dump_reg].reg_offset = cb_reg; \ + dump_regs_tbl[dump_reg].name = #cb_reg; \ + dump_regs_tbl[dump_reg].must_be_present = mbp; \ + dump_regs_tbl[dump_reg].dump_reg_type = drt; \ + } while (0) + +static void msm_iommu_build_dump_regs_table(void) +{ + DUMP_REG_INIT(DUMP_REG_FAR0, CB_FAR, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_FAR1, CB_FAR + 4, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_PAR0, CB_PAR, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_PAR1, CB_PAR + 4, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_FSR, CB_FSR, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_FSYNR0, CB_FSYNR0, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_FSYNR1, CB_FSYNR1, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_TTBR0_0, CB_TTBR0, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_TTBR0_1, CB_TTBR0 + 4, 0, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_TTBR1_0, CB_TTBR1, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_TTBR1_1, CB_TTBR1 + 4, 0, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_SCTLR, CB_SCTLR, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_ACTLR, CB_ACTLR, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_PRRR, CB_PRRR, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_NMRR, CB_NMRR, 1, DRT_CTX_REG); + DUMP_REG_INIT(DUMP_REG_CBAR_N, CBAR, 1, DRT_GLOBAL_REG_N); + DUMP_REG_INIT(DUMP_REG_CBFRSYNRA_N, CBFRSYNRA, 1, DRT_GLOBAL_REG_N); +} + +#ifdef CONFIG_IOMMU_PGTABLES_L2 +static void __do_set_redirect(struct iommu_domain *domain, void *data) +{ + struct msm_iommu_priv *priv; + int *no_redirect = data; + + mutex_lock(&msm_iommu_lock); + priv = to_msm_priv(domain); + priv->pt.redirect = !(*no_redirect); + mutex_unlock(&msm_iommu_lock); +} + +static void __do_get_redirect(struct iommu_domain *domain, void *data) +{ + struct msm_iommu_priv *priv; + int *no_redirect = data; + + mutex_lock(&msm_iommu_lock); + priv = to_msm_priv(domain); + *no_redirect = !priv->pt.redirect; + mutex_unlock(&msm_iommu_lock); +} + +#else + +static void __do_set_redirect(struct iommu_domain *domain, void *data) +{ +} + +static void __do_get_redirect(struct iommu_domain *domain, void *data) +{ +} +#endif + +static int msm_iommu_domain_set_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + switch (attr) { + case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE: + __do_set_redirect(domain, data); + break; + default: + return -EINVAL; + } + return 0; +} + +static int msm_iommu_domain_get_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + switch (attr) { + case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE: + __do_get_redirect(domain, data); + break; + default: + return -EINVAL; + } + return 0; +} + +static int msm_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) +{ + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + struct platform_device *pdev, *ctx_pdev; + struct msm_iommu_master *master; + struct device_node *child; + bool found = false; + u32 val; + int ret; + + if (args->args_count > 2) + return -EINVAL; + + dev_dbg(dev, "getting pdev for %s\n", args->np->name); + + pdev = of_find_device_by_node(args->np); + if (!pdev) { + dev_dbg(dev, "iommu pdev not found\n"); + return -ENODEV; + } + + iommu_drvdata = platform_get_drvdata(pdev); + if (!iommu_drvdata) + return -ENODEV; + + for_each_child_of_node(args->np, child) { + ctx_pdev = of_find_device_by_node(child); + if (!ctx_pdev) + return -ENODEV; + + ctx_drvdata = platform_get_drvdata(ctx_pdev); + + ret = of_property_read_u32(child, "qcom,ctx-num", &val); + if (ret) + return ret; + + if (val == args->args[0]) { + found = true; + break; + } + } + + if (!found) + return -ENODEV; + + dev_dbg(dev, "found ctx data for %s (num:%d)\n", + ctx_drvdata->name, ctx_drvdata->num); + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return -ENOMEM; + + INIT_LIST_HEAD(&master->list); + master->ctx_num = args->args[0]; + master->dev = dev; + master->iommu_drvdata = iommu_drvdata; + master->ctx_drvdata = ctx_drvdata; + + dev_dbg(dev, "adding master for device %s\n", dev_name(dev)); + + list_add_tail(&master->list, &iommu_masters); +#if 0 + if (dev->bus && dev->bus->iommu_ops) { + ret = dev->bus->iommu_ops->add_device(dev); + if (ret) { + dev_err(dev, "iommu add_device failed (%d)\n", ret); + return ret; + } + } else { + dev_err(dev, "of_xlate missing iommu_ops for bus\n"); + return -ENODEV; + } +#endif + return 0; +} + +static struct iommu_ops msm_iommu_ops = { + .domain_alloc = msm_iommu_domain_alloc, + .domain_free = msm_iommu_domain_free, + .attach_dev = msm_iommu_attach_dev, + .detach_dev = msm_iommu_detach_dev, + .map = msm_iommu_map, + .unmap = msm_iommu_unmap, + .map_sg = default_iommu_map_sg, /*msm_iommu_map_sg,*/ + .iova_to_phys = msm_iommu_iova_to_phys, + .add_device = msm_iommu_add_device, + .remove_device = msm_iommu_remove_device, + .device_group = msm_iommu_device_group, + .pgsize_bitmap = MSM_IOMMU_PGSIZES, + .domain_set_attr = msm_iommu_domain_set_attr, + .domain_get_attr = msm_iommu_domain_get_attr, + .of_xlate = msm_iommu_of_xlate, +}; + +int msm_iommu_init(struct device *dev) +{ + static bool done = false; + int ret; + + of_iommu_set_ops(dev->of_node, &msm_iommu_ops); + + if (done) + return 0; + + msm_iommu_pagetable_init(); + + ret = bus_set_iommu(&platform_bus_type, &msm_iommu_ops); + if (ret) + return ret; + + msm_iommu_build_dump_regs_table(); + + done = true; + + return 0; +} diff --git a/drivers/iommu/qcom/msm_iommu.c b/drivers/iommu/qcom/msm_iommu.c new file mode 100644 index 000000000000..7dc03740d779 --- /dev/null +++ b/drivers/iommu/qcom/msm_iommu.c @@ -0,0 +1,207 @@ +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/export.h> +#include <linux/iommu.h> + +#include "qcom_iommu.h" + +static DEFINE_MUTEX(iommu_list_lock); +static LIST_HEAD(iommu_list); + +#define MRC(reg, processor, op1, crn, crm, op2) \ +__asm__ __volatile__ ( \ +" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ +: "=r" (reg)) + +#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) +#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) + +#define RCP15_MAIR0(reg) MRC(reg, p15, 0, c10, c2, 0) +#define RCP15_MAIR1(reg) MRC(reg, p15, 0, c10, c2, 1) + +/* These values come from proc-v7-2level.S */ +#define PRRR_VALUE 0xff0a81a8 +#define NMRR_VALUE 0x40e040e0 + +/* These values come from proc-v7-3level.S */ +#define MAIR0_VALUE 0xeeaa4400 +#define MAIR1_VALUE 0xff000004 + +static struct iommu_access_ops *iommu_access_ops; + +struct bus_type msm_iommu_sec_bus_type = { + .name = "msm_iommu_sec_bus", +}; + +void msm_set_iommu_access_ops(struct iommu_access_ops *ops) +{ + iommu_access_ops = ops; +} + +struct iommu_access_ops *msm_get_iommu_access_ops() +{ + BUG_ON(iommu_access_ops == NULL); + return iommu_access_ops; +} +EXPORT_SYMBOL(msm_get_iommu_access_ops); + +void msm_iommu_add_drv(struct msm_iommu_drvdata *drv) +{ + mutex_lock(&iommu_list_lock); + list_add(&drv->list, &iommu_list); + mutex_unlock(&iommu_list_lock); +} + +void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv) +{ + mutex_lock(&iommu_list_lock); + list_del(&drv->list); + mutex_unlock(&iommu_list_lock); +} + +static int find_iommu_ctx(struct device *dev, void *data) +{ + struct msm_iommu_ctx_drvdata *c; + + c = dev_get_drvdata(dev); + if (!c || !c->name) + return 0; + + return !strcmp(data, c->name); +} + +static struct device *find_context(struct device *dev, const char *name) +{ + return device_find_child(dev, (void *)name, find_iommu_ctx); +} + +struct device *msm_iommu_get_ctx(const char *ctx_name) +{ + struct msm_iommu_drvdata *drv; + struct device *dev = NULL; + + mutex_lock(&iommu_list_lock); + list_for_each_entry(drv, &iommu_list, list) { + dev = find_context(drv->dev, ctx_name); + if (dev) + break; + } + mutex_unlock(&iommu_list_lock); + + put_device(dev); + + if (!dev || !dev_get_drvdata(dev)) { + pr_debug("Could not find context <%s>\n", ctx_name); + dev = ERR_PTR(-EPROBE_DEFER); + } + + return dev; +} +EXPORT_SYMBOL(msm_iommu_get_ctx); + +#ifdef CONFIG_ARM +#ifdef CONFIG_IOMMU_LPAE +#ifdef CONFIG_ARM_LPAE +/* + * If CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are enabled we can use the MAIR + * register directly + */ +u32 msm_iommu_get_mair0(void) +{ + unsigned int mair0; + + RCP15_MAIR0(mair0); + return mair0; +} + +u32 msm_iommu_get_mair1(void) +{ + unsigned int mair1; + + RCP15_MAIR1(mair1); + return mair1; +} +#else +/* + * However, If CONFIG_ARM_LPAE is not enabled but CONFIG_IOMMU_LPAE is enabled + * we'll just use the hard coded values directly.. + */ +u32 msm_iommu_get_mair0(void) +{ + return MAIR0_VALUE; +} + +u32 msm_iommu_get_mair1(void) +{ + return MAIR1_VALUE; +} +#endif + +#else +#ifdef CONFIG_ARM_LPAE +/* + * If CONFIG_ARM_LPAE is enabled AND CONFIG_IOMMU_LPAE is disabled + * we must use the hardcoded values. + */ +u32 msm_iommu_get_prrr(void) +{ + return PRRR_VALUE; +} + +u32 msm_iommu_get_nmrr(void) +{ + return NMRR_VALUE; +} +#else +/* + * If both CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are disabled + * we can use the registers directly. + */ +#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) +#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) + +u32 msm_iommu_get_prrr(void) +{ + u32 prrr; + + RCP15_PRRR(prrr); + return prrr; +} + +u32 msm_iommu_get_nmrr(void) +{ + u32 nmrr; + + RCP15_NMRR(nmrr); + return nmrr; +} +#endif +#endif +#endif +#ifdef CONFIG_ARM64 +u32 msm_iommu_get_prrr(void) +{ + return PRRR_VALUE; +} + +u32 msm_iommu_get_nmrr(void) +{ + return NMRR_VALUE; +} +#endif diff --git a/drivers/iommu/qcom/msm_iommu_dev-v1.c b/drivers/iommu/qcom/msm_iommu_dev-v1.c new file mode 100644 index 000000000000..039a68643deb --- /dev/null +++ b/drivers/iommu/qcom/msm_iommu_dev-v1.c @@ -0,0 +1,694 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/iommu.h> +#include <linux/interrupt.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_iommu.h> + +#include "msm_iommu_hw-v1.h" +#include "qcom_iommu.h" +#include <linux/qcom_scm.h> +#include "msm_iommu_perfmon.h" +#include "msm_iommu_priv.h" + +static const struct of_device_id msm_iommu_ctx_match_table[]; + +#ifdef CONFIG_IOMMU_LPAE +static const char *BFB_REG_NODE_NAME = "qcom,iommu-lpae-bfb-regs"; +static const char *BFB_DATA_NODE_NAME = "qcom,iommu-lpae-bfb-data"; +#else +static const char *BFB_REG_NODE_NAME = "qcom,iommu-bfb-regs"; +static const char *BFB_DATA_NODE_NAME = "qcom,iommu-bfb-data"; +#endif + +static int msm_iommu_parse_bfb_settings(struct platform_device *pdev, + struct msm_iommu_drvdata *drvdata) +{ + struct msm_iommu_bfb_settings *bfb_settings; + u32 nreg, nval; + int ret; + + /* + * It is not valid for a device to have the BFB_REG_NODE_NAME + * property but not the BFB_DATA_NODE_NAME property, and vice versa. + */ + if (!of_get_property(pdev->dev.of_node, BFB_REG_NODE_NAME, &nreg)) { + if (of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME, + &nval)) + return -EINVAL; + return 0; + } + + if (!of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME, &nval)) + return -EINVAL; + + if (nreg >= sizeof(bfb_settings->regs)) + return -EINVAL; + + if (nval >= sizeof(bfb_settings->data)) + return -EINVAL; + + if (nval != nreg) + return -EINVAL; + + bfb_settings = devm_kzalloc(&pdev->dev, sizeof(*bfb_settings), + GFP_KERNEL); + if (!bfb_settings) + return -ENOMEM; + + ret = of_property_read_u32_array(pdev->dev.of_node, + BFB_REG_NODE_NAME, + bfb_settings->regs, + nreg / sizeof(*bfb_settings->regs)); + if (ret) + return ret; + + ret = of_property_read_u32_array(pdev->dev.of_node, + BFB_DATA_NODE_NAME, + bfb_settings->data, + nval / sizeof(*bfb_settings->data)); + if (ret) + return ret; + + bfb_settings->length = nreg / sizeof(*bfb_settings->regs); + + drvdata->bfb_settings = bfb_settings; + + return 0; +} + +static int __get_bus_vote_client(struct platform_device *pdev, + struct msm_iommu_drvdata *drvdata) +{ + return 0; +} + +static void __put_bus_vote_client(struct msm_iommu_drvdata *drvdata) +{ + drvdata->bus_client = 0; +} + +/* + * CONFIG_IOMMU_NON_SECURE allows us to override the secure + * designation of SMMUs in device tree. With this config enabled + * all SMMUs will be programmed by this driver. + */ +#ifdef CONFIG_IOMMU_NON_SECURE +static inline void get_secure_id(struct device_node *node, + struct msm_iommu_drvdata *drvdata) +{ +} + +static inline void get_secure_ctx(struct device_node *node, + struct msm_iommu_drvdata *iommu_drvdata, + struct msm_iommu_ctx_drvdata *ctx_drvdata) +{ + ctx_drvdata->secure_context = 0; +} +#else +static void get_secure_id(struct device_node *node, + struct msm_iommu_drvdata *drvdata) +{ + if (msm_iommu_get_scm_call_avail()) + of_property_read_u32(node, "qcom,iommu-secure-id", + &drvdata->sec_id); +} + +static void get_secure_ctx(struct device_node *node, + struct msm_iommu_drvdata *iommu_drvdata, + struct msm_iommu_ctx_drvdata *ctx_drvdata) +{ + u32 secure_ctx = 0; + + if (msm_iommu_get_scm_call_avail()) + secure_ctx = of_property_read_bool(node, "qcom,secure-context"); + + ctx_drvdata->secure_context = secure_ctx; +} +#endif + +static int msm_iommu_parse_dt(struct platform_device *pdev, + struct msm_iommu_drvdata *drvdata) +{ + struct device_node *child; + int ret; + + drvdata->dev = &pdev->dev; + + ret = __get_bus_vote_client(pdev, drvdata); + if (ret) + goto fail; + + ret = msm_iommu_parse_bfb_settings(pdev, drvdata); + if (ret) + goto fail; + + for_each_available_child_of_node(pdev->dev.of_node, child) + drvdata->ncb++; + + ret = of_property_read_string(pdev->dev.of_node, "label", + &drvdata->name); + if (ret) + goto fail; + + drvdata->sec_id = -1; + get_secure_id(pdev->dev.of_node, drvdata); + + drvdata->halt_enabled = of_property_read_bool(pdev->dev.of_node, + "qcom,iommu-enable-halt"); + + msm_iommu_add_drv(drvdata); + + return 0; + +fail: + __put_bus_vote_client(drvdata); + return ret; +} + +static int msm_iommu_pmon_parse_dt(struct platform_device *pdev, + struct iommu_pmon *pmon_info) +{ + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + unsigned int cls_prop_size; + int ret, irq; + + irq = platform_get_irq(pdev, 0); + if (irq < 0 && irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (irq <= 0) { + pmon_info->iommu.evt_irq = -1; + return irq; + } + + pmon_info->iommu.evt_irq = irq; + + ret = of_property_read_u32(np, "qcom,iommu-pmu-ngroups", + &pmon_info->num_groups); + if (ret) { + dev_err(dev, "Error reading qcom,iommu-pmu-ngroups\n"); + return ret; + } + + ret = of_property_read_u32(np, "qcom,iommu-pmu-ncounters", + &pmon_info->num_counters); + if (ret) { + dev_err(dev, "Error reading qcom,iommu-pmu-ncounters\n"); + return ret; + } + + if (!of_get_property(np, "qcom,iommu-pmu-event-classes", + &cls_prop_size)) { + dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n"); + return -EINVAL; + } + + pmon_info->event_cls_supported = devm_kzalloc(dev, cls_prop_size, + GFP_KERNEL); + if (!pmon_info->event_cls_supported) { + dev_err(dev, "Unable to get memory for event class array\n"); + return -ENOMEM; + } + + pmon_info->nevent_cls_supported = cls_prop_size / sizeof(u32); + + ret = of_property_read_u32_array(np, "qcom,iommu-pmu-event-classes", + pmon_info->event_cls_supported, + pmon_info->nevent_cls_supported); + if (ret) { + dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n"); + return ret; + } + + return 0; +} + +#define SCM_SVC_MP 0xc +#define MAXIMUM_VIRT_SIZE (300 * SZ_1M) +#define MAKE_VERSION(major, minor, patch) \ + (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF)) + +static int msm_iommu_sec_ptbl_init(struct device *dev) +{ + size_t psize = 0; + unsigned int spare = 0; + int ret; + int version; + void *cpu_addr; + dma_addr_t paddr; + unsigned long attrs; + static bool allocated = false; + + if (allocated) + return 0; + +#if 0 + version = qcom_scm_get_feat_version(SCM_SVC_MP); + + if (version >= MAKE_VERSION(1, 1, 1)) { + ret = qcom_scm_iommu_set_cp_pool_size(MAXIMUM_VIRT_SIZE, 0); + if (ret) { + dev_err(dev, "failed setting max virtual size (%d)\n", + ret); + return ret; + } + } +#endif + ret = qcom_scm_iommu_secure_ptbl_size(spare, &psize); + if (ret) { + dev_err(dev, "failed to get iommu secure pgtable size (%d)\n", + ret); + return ret; + } + + dev_info(dev, "iommu sec: pgtable size: %zu\n", psize); + + attrs = DMA_ATTR_NO_KERNEL_MAPPING; + + cpu_addr = dma_alloc_attrs(dev, psize, &paddr, GFP_KERNEL, attrs); + if (!cpu_addr) { + dev_err(dev, "failed to allocate %zu bytes for pgtable\n", + psize); + return -ENOMEM; + } + + ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize, spare); + if (ret) { + dev_err(dev, "failed to init iommu pgtable (%d)\n", ret); + goto free_mem; + } + + allocated = true; + + return 0; + +free_mem: + dma_free_attrs(dev, psize, cpu_addr, paddr, attrs); + return ret; +} + +static int msm_iommu_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + struct iommu_pmon *pmon_info; + struct msm_iommu_drvdata *drvdata; + struct resource *res; + int ret; + int global_cfg_irq, global_client_irq; + u32 temp; + unsigned long rate; + + if (!qcom_scm_is_available()) + return -EPROBE_DEFER; + + msm_iommu_check_scm_call_avail(); + msm_set_iommu_access_ops(&iommu_access_ops_v1); + msm_iommu_sec_set_access_ops(&iommu_access_ops_v1); + + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + drvdata->dev = dev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iommu_base"); + drvdata->base = devm_ioremap_resource(dev, res); + if (IS_ERR(drvdata->base)) + return PTR_ERR(drvdata->base); + + drvdata->glb_base = drvdata->base; + drvdata->phys_base = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "smmu_local_base"); + drvdata->smmu_local_base = devm_ioremap_resource(dev, res); + if (IS_ERR(drvdata->smmu_local_base) && + PTR_ERR(drvdata->smmu_local_base) != -EPROBE_DEFER) + drvdata->smmu_local_base = NULL; + + if (of_device_is_compatible(np, "qcom,msm-mmu-500")) + drvdata->model = MMU_500; + + drvdata->iface = devm_clk_get(dev, "iface_clk"); + if (IS_ERR(drvdata->iface)) + return PTR_ERR(drvdata->iface); + + drvdata->core = devm_clk_get(dev, "core_clk"); + if (IS_ERR(drvdata->core)) + return PTR_ERR(drvdata->core); + + if (!of_property_read_u32(np, "qcom,cb-base-offset", &temp)) + drvdata->cb_base = drvdata->base + temp; + else + drvdata->cb_base = drvdata->base + 0x8000; + + rate = clk_get_rate(drvdata->core); + if (!rate) { + rate = clk_round_rate(drvdata->core, 1000); + clk_set_rate(drvdata->core, rate); + } + + ret = msm_iommu_parse_dt(pdev, drvdata); + if (ret) + return ret; + + dev_info(dev, "device %s (model: %d) mapped at %p, with %d ctx banks\n", + drvdata->name, drvdata->model, drvdata->base, drvdata->ncb); + + if (drvdata->sec_id != -1) { + ret = msm_iommu_sec_ptbl_init(dev); + if (ret) + return ret; + } + + platform_set_drvdata(pdev, drvdata); + + pmon_info = msm_iommu_pm_alloc(dev); + if (pmon_info) { + ret = msm_iommu_pmon_parse_dt(pdev, pmon_info); + if (ret) { + msm_iommu_pm_free(dev); + dev_info(dev, "%s: pmon not available\n", + drvdata->name); + } else { + pmon_info->iommu.base = drvdata->base; + pmon_info->iommu.ops = msm_get_iommu_access_ops(); + pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1(); + pmon_info->iommu.iommu_name = drvdata->name; + ret = msm_iommu_pm_iommu_register(pmon_info); + if (ret) { + dev_err(dev, "%s iommu register fail\n", + drvdata->name); + msm_iommu_pm_free(dev); + } else { + dev_dbg(dev, "%s iommu registered for pmon\n", + pmon_info->iommu.iommu_name); + } + } + } + + global_cfg_irq = platform_get_irq_byname(pdev, "global_cfg_NS_irq"); + if (global_cfg_irq < 0 && global_cfg_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (global_cfg_irq > 0) { + ret = devm_request_threaded_irq(dev, global_cfg_irq, + NULL, + msm_iommu_global_fault_handler, + IRQF_ONESHOT | IRQF_SHARED /*| + IRQF_TRIGGER_RISING*/, + "msm_iommu_global_cfg_irq", + pdev); + if (ret < 0) + dev_err(dev, "Request Global CFG IRQ %d failed with ret=%d\n", + global_cfg_irq, ret); + } + + global_client_irq = + platform_get_irq_byname(pdev, "global_client_NS_irq"); + if (global_client_irq < 0 && global_client_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + + if (global_client_irq > 0) { + ret = devm_request_threaded_irq(dev, global_client_irq, + NULL, + msm_iommu_global_fault_handler, + IRQF_ONESHOT | IRQF_SHARED /*| + IRQF_TRIGGER_RISING*/, + "msm_iommu_global_client_irq", + pdev); + if (ret < 0) + dev_err(dev, "Request Global Client IRQ %d failed with ret=%d\n", + global_client_irq, ret); + } + + INIT_LIST_HEAD(&drvdata->masters); + + ret = of_platform_populate(np, msm_iommu_ctx_match_table, NULL, dev); + if (ret) { + dev_err(dev, "Failed to create iommu context device\n"); + return ret; + } + + ret = __enable_clocks(drvdata); + if (ret) { + dev_err(dev, "Failed to enable clocks\n"); + return ret; + } + + return msm_iommu_init(&pdev->dev); +} + +static int msm_iommu_remove(struct platform_device *pdev) +{ + struct msm_iommu_drvdata *drv; + + msm_iommu_pm_iommu_unregister(&pdev->dev); + msm_iommu_pm_free(&pdev->dev); + + drv = platform_get_drvdata(pdev); + if (drv) { + __disable_clocks(drv); + __put_bus_vote_client(drv); + msm_iommu_remove_drv(drv); + platform_set_drvdata(pdev, NULL); + } + + return 0; +} + +static int msm_iommu_ctx_parse_dt(struct platform_device *pdev, + struct msm_iommu_ctx_drvdata *ctx_drvdata) +{ + struct resource *r, rp; + int irq = 0, ret = 0; + struct msm_iommu_drvdata *drvdata; + u32 nsid; + u32 n_sid_mask; + unsigned long cb_offset; + + drvdata = dev_get_drvdata(pdev->dev.parent); + if (!drvdata) + return -EPROBE_DEFER; + + get_secure_ctx(pdev->dev.of_node, drvdata, ctx_drvdata); + + if (ctx_drvdata->secure_context) { + irq = platform_get_irq(pdev, 1); + if (irq < 0 && irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + + if (irq > 0) { + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + msm_iommu_secure_fault_handler_v2, + IRQF_ONESHOT | IRQF_SHARED, + "msm_iommu_secure_irq", pdev); + if (ret) { + pr_err("Request IRQ %d failed with ret=%d\n", + irq, ret); + return ret; + } + } + } else { + irq = platform_get_irq(pdev, 0); + if (irq < 0 && irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + + if (irq > 0) { + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + msm_iommu_fault_handler_v2, + IRQF_ONESHOT | IRQF_SHARED, + "msm_iommu_nonsecure_irq", pdev); + if (ret) { + pr_err("Request IRQ %d failed with ret=%d\n", + irq, ret); + goto out; + } + } + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + ret = -EINVAL; + goto out; + } + + ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp); + if (ret) + goto out; + + /* Calculate the context bank number using the base addresses. + * Typically CB0 base address is 0x8000 pages away if the number + * of CBs are <=8. So, assume the offset 0x8000 until mentioned + * explicitely. + */ + cb_offset = drvdata->cb_base - drvdata->base; + ctx_drvdata->num = (r->start - rp.start - cb_offset) >> CTX_SHIFT; + + if (of_property_read_string(pdev->dev.of_node, "label", + &ctx_drvdata->name)) + ctx_drvdata->name = dev_name(&pdev->dev); + + if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &nsid)) { + ret = -EINVAL; + goto out; + } + + if (nsid >= sizeof(ctx_drvdata->sids)) { + ret = -EINVAL; + goto out; + } + + if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids", + ctx_drvdata->sids, + nsid / sizeof(*ctx_drvdata->sids))) { + ret = -EINVAL; + goto out; + } + + ctx_drvdata->nsid = nsid; + ctx_drvdata->asid = -1; + + if (!of_get_property(pdev->dev.of_node, "qcom,iommu-sid-mask", + &n_sid_mask)) { + memset(ctx_drvdata->sid_mask, 0, MAX_NUM_SMR); + goto out; + } + + if (n_sid_mask != nsid) { + ret = -EINVAL; + goto out; + } + + if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-sid-mask", + ctx_drvdata->sid_mask, + n_sid_mask / sizeof(*ctx_drvdata->sid_mask))) { + ret = -EINVAL; + goto out; + } + + ctx_drvdata->n_sid_mask = n_sid_mask; + +out: + return ret; +} + +static int msm_iommu_ctx_probe(struct platform_device *pdev) +{ + struct msm_iommu_ctx_drvdata *ctx_drvdata; + int ret; + + if (!qcom_scm_is_available()) + return -EPROBE_DEFER; + + if (!pdev->dev.parent) + return -EINVAL; + + ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata), + GFP_KERNEL); + if (!ctx_drvdata) + return -ENOMEM; + + ctx_drvdata->pdev = pdev; + INIT_LIST_HEAD(&ctx_drvdata->attached_elm); + + ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata); + if (ret) + return ret; + + platform_set_drvdata(pdev, ctx_drvdata); + + dev_info(&pdev->dev, "context %s using bank %d\n", + ctx_drvdata->name, ctx_drvdata->num); + + return 0; +} + +static int msm_iommu_ctx_remove(struct platform_device *pdev) +{ + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static const struct of_device_id msm_iommu_match_table[] = { + { .compatible = "qcom,msm-smmu-v1", }, + { .compatible = "qcom,msm-smmu-v2", }, + {} +}; + +static struct platform_driver msm_iommu_driver = { + .driver = { + .name = "msm_iommu", + .of_match_table = msm_iommu_match_table, + }, + .probe = msm_iommu_probe, + .remove = msm_iommu_remove, +}; + +static const struct of_device_id msm_iommu_ctx_match_table[] = { + { .compatible = "qcom,msm-smmu-v1-ctx", }, + { .compatible = "qcom,msm-smmu-v2-ctx", }, + {} +}; + +static struct platform_driver msm_iommu_ctx_driver = { + .driver = { + .name = "msm_iommu_ctx", + .of_match_table = msm_iommu_ctx_match_table, + }, + .probe = msm_iommu_ctx_probe, + .remove = msm_iommu_ctx_remove, +}; + +static int __init msm_iommu_driver_init(struct device_node *np) +{ + int ret; + + ret = platform_driver_register(&msm_iommu_driver); + if (ret) { + pr_err("Failed to register IOMMU driver\n"); + return ret; + } + + ret = platform_driver_register(&msm_iommu_ctx_driver); + if (ret) { + pr_err("Failed to register IOMMU context driver\n"); + platform_driver_unregister(&msm_iommu_driver); + return ret; + } + + return 0; +} +IOMMU_OF_DECLARE(msm_mmuv1, "qcom,msm-mmu-500", msm_iommu_driver_init); + +static void __exit msm_iommu_driver_exit(void) +{ + platform_driver_unregister(&msm_iommu_ctx_driver); + platform_driver_unregister(&msm_iommu_driver); +} +module_exit(msm_iommu_driver_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/qcom/msm_iommu_hw-v1.h b/drivers/iommu/qcom/msm_iommu_hw-v1.h new file mode 100644 index 000000000000..53e2f4874adb --- /dev/null +++ b/drivers/iommu/qcom/msm_iommu_hw-v1.h @@ -0,0 +1,2320 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H +#define __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H + +#define CTX_SHIFT 12 + +#define CTX_REG(reg, base, ctx) \ + ((base) + (reg) + ((ctx) << CTX_SHIFT)) +#define GLB_REG(reg, base) \ + ((base) + (reg)) +#define GLB_REG_N(b, n, r) GLB_REG(b, ((r) + ((n) << 2))) +#define GLB_FIELD(b, r) ((b) + (r)) +#define GLB_CTX_FIELD(b, c, r) (GLB_FIELD(b, r) + ((c) << CTX_SHIFT)) +#define GLB_FIELD_N(b, n, r) (GLB_FIELD(b, r) + ((n) << 2)) + + +#define GET_GLOBAL_REG(reg, base) (readl_relaxed(GLB_REG(reg, base))) +#define GET_GLOBAL_REG_Q(reg, base) (readq_relaxed(GLB_REG(reg, base))) +#define GET_CTX_REG(reg, base, ctx) (readl_relaxed(CTX_REG(reg, base, ctx))) +#define GET_CTX_REG_Q(reg, base, ctx) (readq_relaxed(CTX_REG(reg, base, ctx))) + +#define SET_GLOBAL_REG(reg, base, val) writel_relaxed((val), GLB_REG(reg, base)) +#define SET_GLOBAL_REG_Q(reg, base, val) \ + (writeq_relaxed((val), GLB_REG(reg, base))) + +#define SET_CTX_REG(reg, base, ctx, val) \ + writel_relaxed((val), (CTX_REG(reg, base, ctx))) +#define SET_CTX_REG_Q(reg, base, ctx, val) \ + writeq_relaxed((val), CTX_REG(reg, base, ctx)) + +/* Wrappers for numbered registers */ +#define SET_GLOBAL_REG_N(b, n, r, v) writel_relaxed(((v)), GLB_REG_N(b, n, r)) +#define GET_GLOBAL_REG_N(b, n, r) (readl_relaxed(GLB_REG_N(b, n, r))) + +/* Field wrappers */ +#define GET_GLOBAL_FIELD(b, r, F) \ + GET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT) +#define GET_CONTEXT_FIELD(b, c, r, F) \ + GET_FIELD(GLB_CTX_FIELD(b, c, r), \ + r##_##F##_MASK, r##_##F##_SHIFT) +#define GET_CONTEXT_FIELD_Q(b, c, r, F) \ + GET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \ + r##_##F##_MASK, r##_##F##_SHIFT) + +#define SET_GLOBAL_FIELD(b, r, F, v) \ + SET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT, (v)) +#define SET_CONTEXT_FIELD(b, c, r, F, v) \ + SET_FIELD(GLB_CTX_FIELD(b, c, r), \ + r##_##F##_MASK, r##_##F##_SHIFT, (v)) +#define SET_CONTEXT_FIELD_Q(b, c, r, F, v) \ + SET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \ + r##_##F##_MASK, r##_##F##_SHIFT, (v)) + +/* Wrappers for numbered field registers */ +#define SET_GLOBAL_FIELD_N(b, n, r, F, v) \ + SET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT, v) +#define GET_GLOBAL_FIELD_N(b, n, r, F) \ + GET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT) + +#define GET_FIELD(addr, mask, shift) ((readl_relaxed(addr) >> (shift)) & (mask)) +#define GET_FIELD_Q(addr, mask, shift) \ + ((readq_relaxed(addr) >> (shift)) & (mask)) + +#define SET_FIELD(addr, mask, shift, v) \ +do { \ + int t = readl_relaxed(addr); \ + writel_relaxed((t & ~((mask) << (shift))) + (((v) & \ + (mask)) << (shift)), addr); \ +} while (0) + +#define SET_FIELD_Q(addr, mask, shift, v) \ +do { \ + u64 t = readq_relaxed(addr); \ + writeq_relaxed((t & ~(((u64) mask) << (shift))) + (((v) & \ + ((u64) mask)) << (shift)), addr); \ +} while (0) + + +/* Global register space 0 setters / getters */ +#define SET_CR0(b, v) SET_GLOBAL_REG(CR0, (b), (v)) +#define SET_SCR1(b, v) SET_GLOBAL_REG(SCR1, (b), (v)) +#define SET_CR2(b, v) SET_GLOBAL_REG(CR2, (b), (v)) +#define SET_ACR(b, v) SET_GLOBAL_REG(ACR, (b), (v)) +#define SET_IDR0(b, N, v) SET_GLOBAL_REG(IDR0, (b), (v)) +#define SET_IDR1(b, N, v) SET_GLOBAL_REG(IDR1, (b), (v)) +#define SET_IDR2(b, N, v) SET_GLOBAL_REG(IDR2, (b), (v)) +#define SET_IDR7(b, N, v) SET_GLOBAL_REG(IDR7, (b), (v)) +#define SET_GFAR(b, v) SET_GLOBAL_REG_Q(GFAR, (b), (v)) +#define SET_GFSR(b, v) SET_GLOBAL_REG(GFSR, (b), (v)) +#define SET_GFSRRESTORE(b, v) SET_GLOBAL_REG(GFSRRESTORE, (b), (v)) +#define SET_GFSYNR0(b, v) SET_GLOBAL_REG(GFSYNR0, (b), (v)) +#define SET_GFSYNR1(b, v) SET_GLOBAL_REG(GFSYNR1, (b), (v)) +#define SET_GFSYNR2(b, v) SET_GLOBAL_REG(GFSYNR2, (b), (v)) +#define SET_TLBIVMID(b, v) SET_GLOBAL_REG(TLBIVMID, (b), (v)) +#define SET_TLBIALLNSNH(b, v) SET_GLOBAL_REG(TLBIALLNSNH, (b), (v)) +#define SET_TLBIALLH(b, v) SET_GLOBAL_REG(TLBIALLH, (b), (v)) +#define SET_TLBGSYNC(b, v) SET_GLOBAL_REG(TLBGSYNC, (b), (v)) +#define SET_TLBGSTATUS(b, v) SET_GLOBAL_REG(TLBGSTATUS, (b), (v)) +#define SET_TLBIVAH(b, v) SET_GLOBAL_REG(TLBIVAH, (b), (v)) +#define SET_GATS1UR(b, v) SET_GLOBAL_REG(GATS1UR, (b), (v)) +#define SET_GATS1UW(b, v) SET_GLOBAL_REG(GATS1UW, (b), (v)) +#define SET_GATS1PR(b, v) SET_GLOBAL_REG(GATS1PR, (b), (v)) +#define SET_GATS1PW(b, v) SET_GLOBAL_REG(GATS1PW, (b), (v)) +#define SET_GATS12UR(b, v) SET_GLOBAL_REG(GATS12UR, (b), (v)) +#define SET_GATS12UW(b, v) SET_GLOBAL_REG(GATS12UW, (b), (v)) +#define SET_GATS12PR(b, v) SET_GLOBAL_REG(GATS12PR, (b), (v)) +#define SET_GATS12PW(b, v) SET_GLOBAL_REG(GATS12PW, (b), (v)) +#define SET_GPAR(b, v) SET_GLOBAL_REG(GPAR, (b), (v)) +#define SET_GATSR(b, v) SET_GLOBAL_REG(GATSR, (b), (v)) +#define SET_NSCR0(b, v) SET_GLOBAL_REG(NSCR0, (b), (v)) +#define SET_NSCR2(b, v) SET_GLOBAL_REG(NSCR2, (b), (v)) +#define SET_NSACR(b, v) SET_GLOBAL_REG(NSACR, (b), (v)) +#define SET_NSGFAR(b, v) SET_GLOBAL_REG(NSGFAR, (b), (v)) +#define SET_NSGFSRRESTORE(b, v) SET_GLOBAL_REG(NSGFSRRESTORE, (b), (v)) +#define SET_PMCR(b, v) SET_GLOBAL_REG(PMCR, (b), (v)) +#define SET_SMR_N(b, N, v) SET_GLOBAL_REG_N(SMR, N, (b), (v)) +#define SET_S2CR_N(b, N, v) SET_GLOBAL_REG_N(S2CR, N, (b), (v)) + +#define GET_CR0(b) GET_GLOBAL_REG(CR0, (b)) +#define GET_SCR1(b) GET_GLOBAL_REG(SCR1, (b)) +#define GET_CR2(b) GET_GLOBAL_REG(CR2, (b)) +#define GET_ACR(b) GET_GLOBAL_REG(ACR, (b)) +#define GET_IDR0(b, N) GET_GLOBAL_REG(IDR0, (b)) +#define GET_IDR1(b, N) GET_GLOBAL_REG(IDR1, (b)) +#define GET_IDR2(b, N) GET_GLOBAL_REG(IDR2, (b)) +#define GET_IDR7(b, N) GET_GLOBAL_REG(IDR7, (b)) +#define GET_GFAR(b) GET_GLOBAL_REG_Q(GFAR, (b)) +#define GET_GFSR(b) GET_GLOBAL_REG(GFSR, (b)) +#define GET_GFSRRESTORE(b) GET_GLOBAL_REG(GFSRRESTORE, (b)) +#define GET_GFSYNR0(b) GET_GLOBAL_REG(GFSYNR0, (b)) +#define GET_GFSYNR1(b) GET_GLOBAL_REG(GFSYNR1, (b)) +#define GET_GFSYNR2(b) GET_GLOBAL_REG(GFSYNR2, (b)) +#define GET_TLBIVMID(b) GET_GLOBAL_REG(TLBIVMID, (b)) +#define GET_TLBIALLNSNH(b) GET_GLOBAL_REG(TLBIALLNSNH, (b)) +#define GET_TLBIALLH(b) GET_GLOBAL_REG(TLBIALLH, (b)) +#define GET_TLBGSYNC(b) GET_GLOBAL_REG(TLBGSYNC, (b)) +#define GET_TLBGSTATUS(b) GET_GLOBAL_REG(TLBGSTATUS, (b)) +#define GET_TLBIVAH(b) GET_GLOBAL_REG(TLBIVAH, (b)) +#define GET_GATS1UR(b) GET_GLOBAL_REG(GATS1UR, (b)) +#define GET_GATS1UW(b) GET_GLOBAL_REG(GATS1UW, (b)) +#define GET_GATS1PR(b) GET_GLOBAL_REG(GATS1PR, (b)) +#define GET_GATS1PW(b) GET_GLOBAL_REG(GATS1PW, (b)) +#define GET_GATS12UR(b) GET_GLOBAL_REG(GATS12UR, (b)) +#define GET_GATS12UW(b) GET_GLOBAL_REG(GATS12UW, (b)) +#define GET_GATS12PR(b) GET_GLOBAL_REG(GATS12PR, (b)) +#define GET_GATS12PW(b) GET_GLOBAL_REG(GATS12PW, (b)) +#define GET_GPAR(b) GET_GLOBAL_REG(GPAR, (b)) +#define GET_GATSR(b) GET_GLOBAL_REG(GATSR, (b)) +#define GET_NSCR0(b) GET_GLOBAL_REG(NSCR0, (b)) +#define GET_NSCR2(b) GET_GLOBAL_REG(NSCR2, (b)) +#define GET_NSACR(b) GET_GLOBAL_REG(NSACR, (b)) +#define GET_PMCR(b) GET_GLOBAL_REG(PMCR, (b)) +#define GET_SMR_N(b, N) GET_GLOBAL_REG_N(SMR, N, (b)) +#define GET_S2CR_N(b, N) GET_GLOBAL_REG_N(S2CR, N, (b)) + +/* Global register space 1 setters / getters */ +#define SET_CBAR_N(b, N, v) SET_GLOBAL_REG_N(CBAR, N, (b), (v)) +#define SET_CBFRSYNRA_N(b, N, v) SET_GLOBAL_REG_N(CBFRSYNRA, N, (b), (v)) + +#define GET_CBAR_N(b, N) GET_GLOBAL_REG_N(CBAR, N, (b)) +#define GET_CBFRSYNRA_N(b, N) GET_GLOBAL_REG_N(CBFRSYNRA, N, (b)) + +/* Implementation defined register setters/getters */ +#define SET_MICRO_MMU_CTRL_HALT_REQ(b, v) \ + SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, HALT_REQ, v) +#define GET_MICRO_MMU_CTRL_IDLE(b) \ + GET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, IDLE) +#define SET_MICRO_MMU_CTRL_RESERVED(b, v) \ + SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, RESERVED, v) + +#define MMU_CTRL_IDLE (MICRO_MMU_CTRL_IDLE_MASK << MICRO_MMU_CTRL_IDLE_SHIFT) + +#define SET_PREDICTIONDIS0(b, v) SET_GLOBAL_REG(PREDICTIONDIS0, (b), (v)) +#define SET_PREDICTIONDIS1(b, v) SET_GLOBAL_REG(PREDICTIONDIS1, (b), (v)) +#define SET_S1L1BFBLP0(b, v) SET_GLOBAL_REG(S1L1BFBLP0, (b), (v)) + +/* SSD register setters/getters */ +#define SET_SSDR_N(b, N, v) SET_GLOBAL_REG_N(SSDR_N, N, (b), (v)) + +#define GET_SSDR_N(b, N) GET_GLOBAL_REG_N(SSDR_N, N, (b)) + +/* Context bank register setters/getters */ +#define SET_SCTLR(b, c, v) SET_CTX_REG(CB_SCTLR, (b), (c), (v)) +#define SET_ACTLR(b, c, v) SET_CTX_REG(CB_ACTLR, (b), (c), (v)) +#define SET_RESUME(b, c, v) SET_CTX_REG(CB_RESUME, (b), (c), (v)) +#define SET_TTBCR(b, c, v) SET_CTX_REG(CB_TTBCR, (b), (c), (v)) +#define SET_CONTEXTIDR(b, c, v) SET_CTX_REG(CB_CONTEXTIDR, (b), (c), (v)) +#define SET_PRRR(b, c, v) SET_CTX_REG(CB_PRRR, (b), (c), (v)) +#define SET_NMRR(b, c, v) SET_CTX_REG(CB_NMRR, (b), (c), (v)) +#define SET_PAR(b, c, v) SET_CTX_REG(CB_PAR, (b), (c), (v)) +#define SET_FSR(b, c, v) SET_CTX_REG(CB_FSR, (b), (c), (v)) +#define SET_FSRRESTORE(b, c, v) SET_CTX_REG(CB_FSRRESTORE, (b), (c), (v)) +#define SET_FAR(b, c, v) SET_CTX_REG(CB_FAR, (b), (c), (v)) +#define SET_FSYNR0(b, c, v) SET_CTX_REG(CB_FSYNR0, (b), (c), (v)) +#define SET_FSYNR1(b, c, v) SET_CTX_REG(CB_FSYNR1, (b), (c), (v)) +#define SET_TLBIVA(b, c, v) SET_CTX_REG(CB_TLBIVA, (b), (c), (v)) +#define SET_TLBIVAA(b, c, v) SET_CTX_REG(CB_TLBIVAA, (b), (c), (v)) +#define SET_TLBIASID(b, c, v) SET_CTX_REG(CB_TLBIASID, (b), (c), (v)) +#define SET_TLBIALL(b, c, v) SET_CTX_REG(CB_TLBIALL, (b), (c), (v)) +#define SET_TLBIVAL(b, c, v) SET_CTX_REG(CB_TLBIVAL, (b), (c), (v)) +#define SET_TLBIVAAL(b, c, v) SET_CTX_REG(CB_TLBIVAAL, (b), (c), (v)) +#define SET_TLBSYNC(b, c, v) SET_CTX_REG(CB_TLBSYNC, (b), (c), (v)) +#define SET_TLBSTATUS(b, c, v) SET_CTX_REG(CB_TLBSTATUS, (b), (c), (v)) +#define SET_ATS1PR(b, c, v) SET_CTX_REG(CB_ATS1PR, (b), (c), (v)) +#define SET_ATS1PW(b, c, v) SET_CTX_REG(CB_ATS1PW, (b), (c), (v)) +#define SET_ATS1UR(b, c, v) SET_CTX_REG(CB_ATS1UR, (b), (c), (v)) +#define SET_ATS1UW(b, c, v) SET_CTX_REG(CB_ATS1UW, (b), (c), (v)) +#define SET_ATSR(b, c, v) SET_CTX_REG(CB_ATSR, (b), (c), (v)) + +#define GET_SCTLR(b, c) GET_CTX_REG(CB_SCTLR, (b), (c)) +#define GET_ACTLR(b, c) GET_CTX_REG(CB_ACTLR, (b), (c)) +#define GET_RESUME(b, c) GET_CTX_REG(CB_RESUME, (b), (c)) +#define GET_TTBR0(b, c) GET_CTX_REG(CB_TTBR0, (b), (c)) +#define GET_TTBR1(b, c) GET_CTX_REG(CB_TTBR1, (b), (c)) +#define GET_TTBCR(b, c) GET_CTX_REG(CB_TTBCR, (b), (c)) +#define GET_CONTEXTIDR(b, c) GET_CTX_REG(CB_CONTEXTIDR, (b), (c)) +#define GET_PRRR(b, c) GET_CTX_REG(CB_PRRR, (b), (c)) +#define GET_NMRR(b, c) GET_CTX_REG(CB_NMRR, (b), (c)) +#define GET_PAR(b, c) GET_CTX_REG_Q(CB_PAR, (b), (c)) +#define GET_FSR(b, c) GET_CTX_REG(CB_FSR, (b), (c)) +#define GET_FSRRESTORE(b, c) GET_CTX_REG(CB_FSRRESTORE, (b), (c)) +#define GET_FAR(b, c) GET_CTX_REG_Q(CB_FAR, (b), (c)) +#define GET_FSYNR0(b, c) GET_CTX_REG(CB_FSYNR0, (b), (c)) +#define GET_FSYNR1(b, c) GET_CTX_REG(CB_FSYNR1, (b), (c)) +#define GET_TLBIVA(b, c) GET_CTX_REG(CB_TLBIVA, (b), (c)) +#define GET_TLBIVAA(b, c) GET_CTX_REG(CB_TLBIVAA, (b), (c)) +#define GET_TLBIASID(b, c) GET_CTX_REG(CB_TLBIASID, (b), (c)) +#define GET_TLBIALL(b, c) GET_CTX_REG(CB_TLBIALL, (b), (c)) +#define GET_TLBIVAL(b, c) GET_CTX_REG(CB_TLBIVAL, (b), (c)) +#define GET_TLBIVAAL(b, c) GET_CTX_REG(CB_TLBIVAAL, (b), (c)) +#define GET_TLBSYNC(b, c) GET_CTX_REG(CB_TLBSYNC, (b), (c)) +#define GET_TLBSTATUS(b, c) GET_CTX_REG(CB_TLBSTATUS, (b), (c)) +#define GET_ATS1PR(b, c) GET_CTX_REG(CB_ATS1PR, (b), (c)) +#define GET_ATS1PW(b, c) GET_CTX_REG(CB_ATS1PW, (b), (c)) +#define GET_ATS1UR(b, c) GET_CTX_REG(CB_ATS1UR, (b), (c)) +#define GET_ATS1UW(b, c) GET_CTX_REG(CB_ATS1UW, (b), (c)) +#define GET_ATSR(b, c) GET_CTX_REG(CB_ATSR, (b), (c)) + +/* Global Register field setters / getters */ +/* Configuration Register: CR0/NSCR0 */ +#define SET_CR0_NSCFG(b, v) SET_GLOBAL_FIELD(b, CR0, NSCFG, v) +#define SET_CR0_WACFG(b, v) SET_GLOBAL_FIELD(b, CR0, WACFG, v) +#define SET_CR0_RACFG(b, v) SET_GLOBAL_FIELD(b, CR0, RACFG, v) +#define SET_CR0_SHCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SHCFG, v) +#define SET_CR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SMCFCFG, v) +#define SET_NSCR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, SMCFCFG, v) +#define SET_CR0_MTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, MTCFG, v) +#define SET_CR0_BSU(b, v) SET_GLOBAL_FIELD(b, CR0, BSU, v) +#define SET_CR0_FB(b, v) SET_GLOBAL_FIELD(b, CR0, FB, v) +#define SET_CR0_PTM(b, v) SET_GLOBAL_FIELD(b, CR0, PTM, v) +#define SET_CR0_VMIDPNE(b, v) SET_GLOBAL_FIELD(b, CR0, VMIDPNE, v) +#define SET_CR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, USFCFG, v) +#define SET_NSCR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, USFCFG, v) +#define SET_CR0_GSE(b, v) SET_GLOBAL_FIELD(b, CR0, GSE, v) +#define SET_CR0_STALLD(b, v) SET_GLOBAL_FIELD(b, CR0, STALLD, v) +#define SET_NSCR0_STALLD(b, v) SET_GLOBAL_FIELD(b, NSCR0, STALLD, v) +#define SET_CR0_TRANSIENTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG, v) +#define SET_CR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFIE, v) +#define SET_NSCR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFIE, v) +#define SET_CR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFRE, v) +#define SET_NSCR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFRE, v) +#define SET_CR0_GFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GFIE, v) +#define SET_NSCR0_GFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFIE, v) +#define SET_CR0_GFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GFRE, v) +#define SET_NSCR0_GFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFRE, v) +#define SET_CR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, CR0, CLIENTPD, v) +#define SET_NSCR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, NSCR0, CLIENTPD, v) + +#define SET_ACR_SMTNMC_BPTLBEN(b, v)\ + SET_GLOBAL_FIELD(b, ACR, SMTNMC_BPTLBEN, v) +#define SET_ACR_MMUDIS_BPTLBEN(b, v)\ + SET_GLOBAL_FIELD(b, ACR, MMUDIS_BPTLBEN, v) +#define SET_ACR_S2CR_BPTLBEN(b, v)\ + SET_GLOBAL_FIELD(b, ACR, S2CR_BPTLBEN, v) + +#define SET_NSACR_SMTNMC_BPTLBEN(b, v)\ + SET_GLOBAL_FIELD(b, NSACR, SMTNMC_BPTLBEN, v) +#define SET_NSACR_MMUDIS_BPTLBEN(b, v)\ + SET_GLOBAL_FIELD(b, NSACR, MMUDIS_BPTLBEN, v) +#define SET_NSACR_S2CR_BPTLBEN(b, v)\ + SET_GLOBAL_FIELD(b, NSACR, S2CR_BPTLBEN, v) + +#define GET_CR0_NSCFG(b) GET_GLOBAL_FIELD(b, CR0, NSCFG) +#define GET_CR0_WACFG(b) GET_GLOBAL_FIELD(b, CR0, WACFG) +#define GET_CR0_RACFG(b) GET_GLOBAL_FIELD(b, CR0, RACFG) +#define GET_CR0_SHCFG(b) GET_GLOBAL_FIELD(b, CR0, SHCFG) +#define GET_CR0_SMCFCFG(b) GET_GLOBAL_FIELD(b, CR0, SMCFCFG) +#define GET_CR0_MTCFG(b) GET_GLOBAL_FIELD(b, CR0, MTCFG) +#define GET_CR0_BSU(b) GET_GLOBAL_FIELD(b, CR0, BSU) +#define GET_CR0_FB(b) GET_GLOBAL_FIELD(b, CR0, FB) +#define GET_CR0_PTM(b) GET_GLOBAL_FIELD(b, CR0, PTM) +#define GET_CR0_VMIDPNE(b) GET_GLOBAL_FIELD(b, CR0, VMIDPNE) +#define GET_CR0_USFCFG(b) GET_GLOBAL_FIELD(b, CR0, USFCFG) +#define GET_CR0_GSE(b) GET_GLOBAL_FIELD(b, CR0, GSE) +#define GET_CR0_STALLD(b) GET_GLOBAL_FIELD(b, CR0, STALLD) +#define GET_CR0_TRANSIENTCFG(b) GET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG) +#define GET_CR0_GCFGFIE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFIE) +#define GET_CR0_GCFGFRE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFRE) +#define GET_CR0_GFIE(b) GET_GLOBAL_FIELD(b, CR0, GFIE) +#define GET_CR0_GFRE(b) GET_GLOBAL_FIELD(b, CR0, GFRE) +#define GET_CR0_CLIENTPD(b) GET_GLOBAL_FIELD(b, CR0, CLIENTPD) + +/* Configuration Register: CR2 */ +#define SET_CR2_BPVMID(b, v) SET_GLOBAL_FIELD(b, CR2, BPVMID, v) + +#define GET_CR2_BPVMID(b) GET_GLOBAL_FIELD(b, CR2, BPVMID) + +/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */ +#define SET_GATS1PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PR, ADDR, v) +#define SET_GATS1PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PR, NDX, v) + +#define GET_GATS1PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PR, ADDR) +#define GET_GATS1PR_NDX(b) GET_GLOBAL_FIELD(b, GATS1PR, NDX) + +/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */ +#define SET_GATS1PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PW, ADDR, v) +#define SET_GATS1PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PW, NDX, v) + +#define GET_GATS1PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PW, ADDR) +#define GET_GATS1PW_NDX(b) GET_GLOBAL_FIELD(b, GATS1PW, NDX) + +/* Global Address Translation, Stage 1, User Read: GATS1UR */ +#define SET_GATS1UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UR, ADDR, v) +#define SET_GATS1UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UR, NDX, v) + +#define GET_GATS1UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UR, ADDR) +#define GET_GATS1UR_NDX(b) GET_GLOBAL_FIELD(b, GATS1UR, NDX) + +/* Global Address Translation, Stage 1, User Read: GATS1UW */ +#define SET_GATS1UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UW, ADDR, v) +#define SET_GATS1UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UW, NDX, v) + +#define GET_GATS1UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UW, ADDR) +#define GET_GATS1UW_NDX(b) GET_GLOBAL_FIELD(b, GATS1UW, NDX) + +/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */ +#define SET_GATS12PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PR, ADDR, v) +#define SET_GATS12PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PR, NDX, v) + +#define GET_GATS12PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PR, ADDR) +#define GET_GATS12PR_NDX(b) GET_GLOBAL_FIELD(b, GATS12PR, NDX) + +/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */ +#define SET_GATS12PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PW, ADDR, v) +#define SET_GATS12PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PW, NDX, v) + +#define GET_GATS12PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PW, ADDR) +#define GET_GATS12PW_NDX(b) GET_GLOBAL_FIELD(b, GATS12PW, NDX) + +/* Global Address Translation, Stage 1, User Read: GATS1UR */ +#define SET_GATS12UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UR, ADDR, v) +#define SET_GATS12UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UR, NDX, v) + +#define GET_GATS12UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UR, ADDR) +#define GET_GATS12UR_NDX(b) GET_GLOBAL_FIELD(b, GATS12UR, NDX) + +/* Global Address Translation, Stage 1, User Read: GATS1UW */ +#define SET_GATS12UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UW, ADDR, v) +#define SET_GATS12UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UW, NDX, v) + +#define GET_GATS12UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UW, ADDR) +#define GET_GATS12UW_NDX(b) GET_GLOBAL_FIELD(b, GATS12UW, NDX) + +/* Global Address Translation Status Register: GATSR */ +#define SET_GATSR_ACTIVE(b, v) SET_GLOBAL_FIELD(b, GATSR, ACTIVE, v) + +#define GET_GATSR_ACTIVE(b) GET_GLOBAL_FIELD(b, GATSR, ACTIVE) + +/* Global Fault Address Register: GFAR */ +#define SET_GFAR_FADDR(b, v) SET_GLOBAL_FIELD(b, GFAR, FADDR, v) + +#define GET_GFAR_FADDR(b) GET_GLOBAL_FIELD(b, GFAR, FADDR) + +/* Global Fault Status Register: GFSR */ +#define SET_GFSR_ICF(b, v) SET_GLOBAL_FIELD(b, GFSR, ICF, v) +#define SET_GFSR_USF(b, v) SET_GLOBAL_FIELD(b, GFSR, USF, v) +#define SET_GFSR_SMCF(b, v) SET_GLOBAL_FIELD(b, GFSR, SMCF, v) +#define SET_GFSR_UCBF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCBF, v) +#define SET_GFSR_UCIF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCIF, v) +#define SET_GFSR_CAF(b, v) SET_GLOBAL_FIELD(b, GFSR, CAF, v) +#define SET_GFSR_EF(b, v) SET_GLOBAL_FIELD(b, GFSR, EF, v) +#define SET_GFSR_PF(b, v) SET_GLOBAL_FIELD(b, GFSR, PF, v) +#define SET_GFSR_MULTI(b, v) SET_GLOBAL_FIELD(b, GFSR, MULTI, v) + +#define GET_GFSR_ICF(b) GET_GLOBAL_FIELD(b, GFSR, ICF) +#define GET_GFSR_USF(b) GET_GLOBAL_FIELD(b, GFSR, USF) +#define GET_GFSR_SMCF(b) GET_GLOBAL_FIELD(b, GFSR, SMCF) +#define GET_GFSR_UCBF(b) GET_GLOBAL_FIELD(b, GFSR, UCBF) +#define GET_GFSR_UCIF(b) GET_GLOBAL_FIELD(b, GFSR, UCIF) +#define GET_GFSR_CAF(b) GET_GLOBAL_FIELD(b, GFSR, CAF) +#define GET_GFSR_EF(b) GET_GLOBAL_FIELD(b, GFSR, EF) +#define GET_GFSR_PF(b) GET_GLOBAL_FIELD(b, GFSR, PF) +#define GET_GFSR_MULTI(b) GET_GLOBAL_FIELD(b, GFSR, MULTI) + +/* Global Fault Syndrome Register 0: GFSYNR0 */ +#define SET_GFSYNR0_NESTED(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NESTED, v) +#define SET_GFSYNR0_WNR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, WNR, v) +#define SET_GFSYNR0_PNU(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, PNU, v) +#define SET_GFSYNR0_IND(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, IND, v) +#define SET_GFSYNR0_NSSTATE(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE, v) +#define SET_GFSYNR0_NSATTR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSATTR, v) + +#define GET_GFSYNR0_NESTED(b) GET_GLOBAL_FIELD(b, GFSYNR0, NESTED) +#define GET_GFSYNR0_WNR(b) GET_GLOBAL_FIELD(b, GFSYNR0, WNR) +#define GET_GFSYNR0_PNU(b) GET_GLOBAL_FIELD(b, GFSYNR0, PNU) +#define GET_GFSYNR0_IND(b) GET_GLOBAL_FIELD(b, GFSYNR0, IND) +#define GET_GFSYNR0_NSSTATE(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE) +#define GET_GFSYNR0_NSATTR(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSATTR) + +/* Global Fault Syndrome Register 1: GFSYNR1 */ +#define SET_GFSYNR1_SID(b, v) SET_GLOBAL_FIELD(b, GFSYNR1, SID, v) + +#define GET_GFSYNR1_SID(b) GET_GLOBAL_FIELD(b, GFSYNR1, SID) + +/* Global Physical Address Register: GPAR */ +#define SET_GPAR_F(b, v) SET_GLOBAL_FIELD(b, GPAR, F, v) +#define SET_GPAR_SS(b, v) SET_GLOBAL_FIELD(b, GPAR, SS, v) +#define SET_GPAR_OUTER(b, v) SET_GLOBAL_FIELD(b, GPAR, OUTER, v) +#define SET_GPAR_INNER(b, v) SET_GLOBAL_FIELD(b, GPAR, INNER, v) +#define SET_GPAR_SH(b, v) SET_GLOBAL_FIELD(b, GPAR, SH, v) +#define SET_GPAR_NS(b, v) SET_GLOBAL_FIELD(b, GPAR, NS, v) +#define SET_GPAR_NOS(b, v) SET_GLOBAL_FIELD(b, GPAR, NOS, v) +#define SET_GPAR_PA(b, v) SET_GLOBAL_FIELD(b, GPAR, PA, v) +#define SET_GPAR_TF(b, v) SET_GLOBAL_FIELD(b, GPAR, TF, v) +#define SET_GPAR_AFF(b, v) SET_GLOBAL_FIELD(b, GPAR, AFF, v) +#define SET_GPAR_PF(b, v) SET_GLOBAL_FIELD(b, GPAR, PF, v) +#define SET_GPAR_EF(b, v) SET_GLOBAL_FIELD(b, GPAR, EF, v) +#define SET_GPAR_TLCMCF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLCMCF, v) +#define SET_GPAR_TLBLKF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLBLKF, v) +#define SET_GPAR_UCBF(b, v) SET_GLOBAL_FIELD(b, GPAR, UCBF, v) + +#define GET_GPAR_F(b) GET_GLOBAL_FIELD(b, GPAR, F) +#define GET_GPAR_SS(b) GET_GLOBAL_FIELD(b, GPAR, SS) +#define GET_GPAR_OUTER(b) GET_GLOBAL_FIELD(b, GPAR, OUTER) +#define GET_GPAR_INNER(b) GET_GLOBAL_FIELD(b, GPAR, INNER) +#define GET_GPAR_SH(b) GET_GLOBAL_FIELD(b, GPAR, SH) +#define GET_GPAR_NS(b) GET_GLOBAL_FIELD(b, GPAR, NS) +#define GET_GPAR_NOS(b) GET_GLOBAL_FIELD(b, GPAR, NOS) +#define GET_GPAR_PA(b) GET_GLOBAL_FIELD(b, GPAR, PA) +#define GET_GPAR_TF(b) GET_GLOBAL_FIELD(b, GPAR, TF) +#define GET_GPAR_AFF(b) GET_GLOBAL_FIELD(b, GPAR, AFF) +#define GET_GPAR_PF(b) GET_GLOBAL_FIELD(b, GPAR, PF) +#define GET_GPAR_EF(b) GET_GLOBAL_FIELD(b, GPAR, EF) +#define GET_GPAR_TLCMCF(b) GET_GLOBAL_FIELD(b, GPAR, TLCMCF) +#define GET_GPAR_TLBLKF(b) GET_GLOBAL_FIELD(b, GPAR, TLBLKF) +#define GET_GPAR_UCBF(b) GET_GLOBAL_FIELD(b, GPAR, UCBF) + +/* Identification Register: IDR0 */ +#define SET_IDR0_NUMSMRG(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSMRG, v) +#define SET_IDR0_NUMSIDB(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSIDB, v) +#define SET_IDR0_BTM(b, v) SET_GLOBAL_FIELD(b, IDR0, BTM, v) +#define SET_IDR0_CTTW(b, v) SET_GLOBAL_FIELD(b, IDR0, CTTW, v) +#define SET_IDR0_NUMIRPT(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMIRPT, v) +#define SET_IDR0_PTFS(b, v) SET_GLOBAL_FIELD(b, IDR0, PTFS, v) +#define SET_IDR0_SMS(b, v) SET_GLOBAL_FIELD(b, IDR0, SMS, v) +#define SET_IDR0_NTS(b, v) SET_GLOBAL_FIELD(b, IDR0, NTS, v) +#define SET_IDR0_S2TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S2TS, v) +#define SET_IDR0_S1TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S1TS, v) +#define SET_IDR0_SES(b, v) SET_GLOBAL_FIELD(b, IDR0, SES, v) + +#define GET_IDR0_NUMSMRG(b) GET_GLOBAL_FIELD(b, IDR0, NUMSMRG) +#define GET_IDR0_NUMSIDB(b) GET_GLOBAL_FIELD(b, IDR0, NUMSIDB) +#define GET_IDR0_BTM(b) GET_GLOBAL_FIELD(b, IDR0, BTM) +#define GET_IDR0_CTTW(b) GET_GLOBAL_FIELD(b, IDR0, CTTW) +#define GET_IDR0_NUMIRPT(b) GET_GLOBAL_FIELD(b, IDR0, NUMIRPT) +#define GET_IDR0_PTFS(b) GET_GLOBAL_FIELD(b, IDR0, PTFS) +#define GET_IDR0_SMS(b) GET_GLOBAL_FIELD(b, IDR0, SMS) +#define GET_IDR0_NTS(b) GET_GLOBAL_FIELD(b, IDR0, NTS) +#define GET_IDR0_S2TS(b) GET_GLOBAL_FIELD(b, IDR0, S2TS) +#define GET_IDR0_S1TS(b) GET_GLOBAL_FIELD(b, IDR0, S1TS) +#define GET_IDR0_SES(b) GET_GLOBAL_FIELD(b, IDR0, SES) + +/* Identification Register: IDR1 */ +#define SET_IDR1_NUMCB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMCB, v) +#define SET_IDR1_NUMSSDNDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB, v) +#define SET_IDR1_SSDTP(b, v) SET_GLOBAL_FIELD(b, IDR1, SSDTP, v) +#define SET_IDR1_SMCD(b, v) SET_GLOBAL_FIELD(b, IDR1, SMCD, v) +#define SET_IDR1_NUMS2CB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMS2CB, v) +#define SET_IDR1_NUMPAGENDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB, v) +#define SET_IDR1_PAGESIZE(b, v) SET_GLOBAL_FIELD(b, IDR1, PAGESIZE, v) + +#define GET_IDR1_NUMCB(b) GET_GLOBAL_FIELD(b, IDR1, NUMCB) +#define GET_IDR1_NUMSSDNDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB) +#define GET_IDR1_SSDTP(b) GET_GLOBAL_FIELD(b, IDR1, SSDTP) +#define GET_IDR1_SMCD(b) GET_GLOBAL_FIELD(b, IDR1, SMCD) +#define GET_IDR1_NUMS2CB(b) GET_GLOBAL_FIELD(b, IDR1, NUMS2CB) +#define GET_IDR1_NUMPAGENDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB) +#define GET_IDR1_PAGESIZE(b) GET_GLOBAL_FIELD(b, IDR1, PAGESIZE) + +/* Identification Register: IDR2 */ +#define SET_IDR2_IAS(b, v) SET_GLOBAL_FIELD(b, IDR2, IAS, v) +#define SET_IDR2_OAS(b, v) SET_GLOBAL_FIELD(b, IDR2, OAS, v) + +#define GET_IDR2_IAS(b) GET_GLOBAL_FIELD(b, IDR2, IAS) +#define GET_IDR2_OAS(b) GET_GLOBAL_FIELD(b, IDR2, OAS) + +/* Identification Register: IDR7 */ +#define SET_IDR7_MINOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MINOR, v) +#define SET_IDR7_MAJOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MAJOR, v) + +#define GET_IDR7_MINOR(b) GET_GLOBAL_FIELD(b, IDR7, MINOR) +#define GET_IDR7_MAJOR(b) GET_GLOBAL_FIELD(b, IDR7, MAJOR) + +/* Stream to Context Register: S2CR_N */ +#define SET_S2CR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX, v) +#define SET_S2CR_SHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG, v) +#define SET_S2CR_MTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG, v) +#define SET_S2CR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR, v) +#define SET_S2CR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, TYPE, v) +#define SET_S2CR_NSCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG, v) +#define SET_S2CR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, RACFG, v) +#define SET_S2CR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, WACFG, v) +#define SET_S2CR_PRIVCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG, v) +#define SET_S2CR_INSTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG, v) +#define SET_S2CR_TRANSIENTCFG(b, n, v) \ + SET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG, v) +#define SET_S2CR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, VMID, v) +#define SET_S2CR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, BSU, v) +#define SET_S2CR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, FB, v) + +#define GET_S2CR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX) +#define GET_S2CR_SHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG) +#define GET_S2CR_MTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG) +#define GET_S2CR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR) +#define GET_S2CR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, TYPE) +#define GET_S2CR_NSCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG) +#define GET_S2CR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, RACFG) +#define GET_S2CR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, WACFG) +#define GET_S2CR_PRIVCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG) +#define GET_S2CR_INSTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG) +#define GET_S2CR_TRANSIENTCFG(b, n) \ + GET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG) +#define GET_S2CR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, VMID) +#define GET_S2CR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, BSU) +#define GET_S2CR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, FB) + +/* Stream Match Register: SMR_N */ +#define SET_SMR_ID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, ID, v) +#define SET_SMR_MASK(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, MASK, v) +#define SET_SMR_VALID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, VALID, v) + +#define GET_SMR_ID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, ID) +#define GET_SMR_MASK(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, MASK) +#define GET_SMR_VALID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, VALID) + +/* Global TLB Status: TLBGSTATUS */ +#define SET_TLBGSTATUS_GSACTIVE(b, v) \ + SET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE, v) + +#define GET_TLBGSTATUS_GSACTIVE(b) \ + GET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE) + +/* Invalidate Hyp TLB by VA: TLBIVAH */ +#define SET_TLBIVAH_ADDR(b, v) SET_GLOBAL_FIELD(b, TLBIVAH, ADDR, v) + +#define GET_TLBIVAH_ADDR(b) GET_GLOBAL_FIELD(b, TLBIVAH, ADDR) + +/* Invalidate TLB by VMID: TLBIVMID */ +#define SET_TLBIVMID_VMID(b, v) SET_GLOBAL_FIELD(b, TLBIVMID, VMID, v) + +#define GET_TLBIVMID_VMID(b) GET_GLOBAL_FIELD(b, TLBIVMID, VMID) + +/* Global Register Space 1 Field setters/getters*/ +/* Context Bank Attribute Register: CBAR_N */ +#define SET_CBAR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, VMID, v) +#define SET_CBAR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX, v) +#define SET_CBAR_BPSHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG, v) +#define SET_CBAR_HYPC(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, HYPC, v) +#define SET_CBAR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, FB, v) +#define SET_CBAR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR, v) +#define SET_CBAR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, TYPE, v) +#define SET_CBAR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BSU, v) +#define SET_CBAR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, RACFG, v) +#define SET_CBAR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, WACFG, v) +#define SET_CBAR_IRPTNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX, v) + +#define GET_CBAR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, VMID) +#define GET_CBAR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX) +#define GET_CBAR_BPSHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG) +#define GET_CBAR_HYPC(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, HYPC) +#define GET_CBAR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, FB) +#define GET_CBAR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR) +#define GET_CBAR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, TYPE) +#define GET_CBAR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BSU) +#define GET_CBAR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, RACFG) +#define GET_CBAR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, WACFG) +#define GET_CBAR_IRPTNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX) + +/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA_N */ +#define SET_CBFRSYNRA_SID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID, v) + +#define GET_CBFRSYNRA_SID(b, n) GET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID) + +/* Stage 1 Context Bank Format Fields */ +#define SET_CB_ACTLR_REQPRIORITY (b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY, v) +#define SET_CB_ACTLR_REQPRIORITYCFG(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG, v) +#define SET_CB_ACTLR_PRIVCFG(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG, v) +#define SET_CB_ACTLR_BPRCOSH(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH, v) +#define SET_CB_ACTLR_BPRCISH(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH, v) +#define SET_CB_ACTLR_BPRCNSH(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH, v) + +#define GET_CB_ACTLR_REQPRIORITY (b, c) \ + GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY) +#define GET_CB_ACTLR_REQPRIORITYCFG(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG) +#define GET_CB_ACTLR_PRIVCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG) +#define GET_CB_ACTLR_BPRCOSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH) +#define GET_CB_ACTLR_BPRCISH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH) +#define GET_CB_ACTLR_BPRCNSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH) + +/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */ +#define SET_CB_ATS1PR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR, v) + +#define GET_CB_ATS1PR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR) + +/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */ +#define SET_CB_ATS1PW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR, v) + +#define GET_CB_ATS1PW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR) + +/* Address Translation, Stage 1, User Read: CB_ATS1UR */ +#define SET_CB_ATS1UR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR, v) + +#define GET_CB_ATS1UR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR) + +/* Address Translation, Stage 1, User Write: CB_ATS1UW */ +#define SET_CB_ATS1UW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR, v) + +#define GET_CB_ATS1UW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR) + +/* Address Translation Status Register: CB_ATSR */ +#define SET_CB_ATSR_ACTIVE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE, v) + +#define GET_CB_ATSR_ACTIVE(b, c) GET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE) + +/* Context ID Register: CB_CONTEXTIDR */ +#define SET_CB_CONTEXTIDR_ASID(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID, v) +#define SET_CB_CONTEXTIDR_PROCID(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID, v) + +#define GET_CB_CONTEXTIDR_ASID(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID) +#define GET_CB_CONTEXTIDR_PROCID(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID) + +/* Fault Address Register: CB_FAR */ +#define SET_CB_FAR_FADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FAR, FADDR, v) + +#define GET_CB_FAR_FADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_FAR, FADDR) + +/* Fault Status Register: CB_FSR */ +#define SET_CB_FSR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TF, v) +#define SET_CB_FSR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, AFF, v) +#define SET_CB_FSR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, PF, v) +#define SET_CB_FSR_EF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, EF, v) +#define SET_CB_FSR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF, v) +#define SET_CB_FSR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF, v) +#define SET_CB_FSR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, SS, v) +#define SET_CB_FSR_MULTI(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, MULTI, v) + +#define GET_CB_FSR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TF) +#define GET_CB_FSR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, AFF) +#define GET_CB_FSR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, PF) +#define GET_CB_FSR_EF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, EF) +#define GET_CB_FSR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF) +#define GET_CB_FSR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF) +#define GET_CB_FSR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, SS) +#define GET_CB_FSR_MULTI(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, MULTI) + +/* Fault Syndrome Register 0: CB_FSYNR0 */ +#define SET_CB_FSYNR0_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL, v) +#define SET_CB_FSYNR0_S1PTWF(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF, v) +#define SET_CB_FSYNR0_WNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR, v) +#define SET_CB_FSYNR0_PNU(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU, v) +#define SET_CB_FSYNR0_IND(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND, v) +#define SET_CB_FSYNR0_NSSTATE(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE, v) +#define SET_CB_FSYNR0_NSATTR(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR, v) +#define SET_CB_FSYNR0_ATOF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF, v) +#define SET_CB_FSYNR0_PTWF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF, v) +#define SET_CB_FSYNR0_AFR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR, v) +#define SET_CB_FSYNR0_S1CBNDX(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX, v) + +#define GET_CB_FSYNR0_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL) +#define GET_CB_FSYNR0_S1PTWF(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF) +#define GET_CB_FSYNR0_WNR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR) +#define GET_CB_FSYNR0_PNU(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU) +#define GET_CB_FSYNR0_IND(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND) +#define GET_CB_FSYNR0_NSSTATE(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE) +#define GET_CB_FSYNR0_NSATTR(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR) +#define GET_CB_FSYNR0_ATOF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF) +#define GET_CB_FSYNR0_PTWF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF) +#define GET_CB_FSYNR0_AFR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR) +#define GET_CB_FSYNR0_S1CBNDX(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX) + +/* Normal Memory Remap Register: CB_NMRR */ +#define SET_CB_NMRR_IR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR0, v) +#define SET_CB_NMRR_IR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR1, v) +#define SET_CB_NMRR_IR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR2, v) +#define SET_CB_NMRR_IR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR3, v) +#define SET_CB_NMRR_IR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR4, v) +#define SET_CB_NMRR_IR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR5, v) +#define SET_CB_NMRR_IR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR6, v) +#define SET_CB_NMRR_IR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR7, v) +#define SET_CB_NMRR_OR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR0, v) +#define SET_CB_NMRR_OR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR1, v) +#define SET_CB_NMRR_OR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR2, v) +#define SET_CB_NMRR_OR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR3, v) +#define SET_CB_NMRR_OR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR4, v) +#define SET_CB_NMRR_OR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR5, v) +#define SET_CB_NMRR_OR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR6, v) +#define SET_CB_NMRR_OR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR7, v) + +#define GET_CB_NMRR_IR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR0) +#define GET_CB_NMRR_IR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR1) +#define GET_CB_NMRR_IR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR2) +#define GET_CB_NMRR_IR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR3) +#define GET_CB_NMRR_IR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR4) +#define GET_CB_NMRR_IR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR5) +#define GET_CB_NMRR_IR6(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR6) +#define GET_CB_NMRR_IR7(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR7) +#define GET_CB_NMRR_OR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR0) +#define GET_CB_NMRR_OR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR1) +#define GET_CB_NMRR_OR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR2) +#define GET_CB_NMRR_OR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR3) +#define GET_CB_NMRR_OR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR4) +#define GET_CB_NMRR_OR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR5) + +/* Physical Address Register: CB_PAR */ +#define SET_CB_PAR_F(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, F, v) +#define SET_CB_PAR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SS, v) +#define SET_CB_PAR_OUTER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, OUTER, v) +#define SET_CB_PAR_INNER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, INNER, v) +#define SET_CB_PAR_SH(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SH, v) +#define SET_CB_PAR_NS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NS, v) +#define SET_CB_PAR_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NOS, v) +#define SET_CB_PAR_PA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PA, v) +#define SET_CB_PAR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TF, v) +#define SET_CB_PAR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, AFF, v) +#define SET_CB_PAR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PF, v) +#define SET_CB_PAR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF, v) +#define SET_CB_PAR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF, v) +#define SET_CB_PAR_ATOT(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, ATOT, v) +#define SET_CB_PAR_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PLVL, v) +#define SET_CB_PAR_STAGE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, STAGE, v) + +#define GET_CB_PAR_F(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, F) +#define GET_CB_PAR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SS) +#define GET_CB_PAR_OUTER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, OUTER) +#define GET_CB_PAR_INNER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, INNER) +#define GET_CB_PAR_SH(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SH) +#define GET_CB_PAR_NS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NS) +#define GET_CB_PAR_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NOS) +#define GET_CB_PAR_PA(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PA) +#define GET_CB_PAR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TF) +#define GET_CB_PAR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, AFF) +#define GET_CB_PAR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PF) +#define GET_CB_PAR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF) +#define GET_CB_PAR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF) +#define GET_CB_PAR_ATOT(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, ATOT) +#define GET_CB_PAR_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PLVL) +#define GET_CB_PAR_STAGE(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, STAGE) + +/* Primary Region Remap Register: CB_PRRR */ +#define SET_CB_PRRR_TR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR0, v) +#define SET_CB_PRRR_TR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR1, v) +#define SET_CB_PRRR_TR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR2, v) +#define SET_CB_PRRR_TR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR3, v) +#define SET_CB_PRRR_TR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR4, v) +#define SET_CB_PRRR_TR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR5, v) +#define SET_CB_PRRR_TR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR6, v) +#define SET_CB_PRRR_TR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR7, v) +#define SET_CB_PRRR_DS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS0, v) +#define SET_CB_PRRR_DS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS1, v) +#define SET_CB_PRRR_NS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS0, v) +#define SET_CB_PRRR_NS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS1, v) +#define SET_CB_PRRR_NOS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0, v) +#define SET_CB_PRRR_NOS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1, v) +#define SET_CB_PRRR_NOS2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2, v) +#define SET_CB_PRRR_NOS3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3, v) +#define SET_CB_PRRR_NOS4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4, v) +#define SET_CB_PRRR_NOS5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5, v) +#define SET_CB_PRRR_NOS6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6, v) +#define SET_CB_PRRR_NOS7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7, v) + +#define GET_CB_PRRR_TR0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR0) +#define GET_CB_PRRR_TR1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR1) +#define GET_CB_PRRR_TR2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR2) +#define GET_CB_PRRR_TR3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR3) +#define GET_CB_PRRR_TR4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR4) +#define GET_CB_PRRR_TR5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR5) +#define GET_CB_PRRR_TR6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR6) +#define GET_CB_PRRR_TR7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR7) +#define GET_CB_PRRR_DS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS0) +#define GET_CB_PRRR_DS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS1) +#define GET_CB_PRRR_NS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS0) +#define GET_CB_PRRR_NS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS1) +#define GET_CB_PRRR_NOS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0) +#define GET_CB_PRRR_NOS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1) +#define GET_CB_PRRR_NOS2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2) +#define GET_CB_PRRR_NOS3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3) +#define GET_CB_PRRR_NOS4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4) +#define GET_CB_PRRR_NOS5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5) +#define GET_CB_PRRR_NOS6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6) +#define GET_CB_PRRR_NOS7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7) + +/* Transaction Resume: CB_RESUME */ +#define SET_CB_RESUME_TNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_RESUME, TNR, v) + +#define GET_CB_RESUME_TNR(b, c) GET_CONTEXT_FIELD(b, c, CB_RESUME, TNR) + +/* System Control Register: CB_SCTLR */ +#define SET_CB_SCTLR_M(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, M, v) +#define SET_CB_SCTLR_TRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE, v) +#define SET_CB_SCTLR_AFE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE, v) +#define SET_CB_SCTLR_AFFD(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD, v) +#define SET_CB_SCTLR_E(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, E, v) +#define SET_CB_SCTLR_CFRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE, v) +#define SET_CB_SCTLR_CFIE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE, v) +#define SET_CB_SCTLR_CFCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG, v) +#define SET_CB_SCTLR_HUPCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF, v) +#define SET_CB_SCTLR_WXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN, v) +#define SET_CB_SCTLR_UWXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN, v) +#define SET_CB_SCTLR_ASIDPNE(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE, v) +#define SET_CB_SCTLR_TRANSIENTCFG(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG, v) +#define SET_CB_SCTLR_MEMATTR(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR, v) +#define SET_CB_SCTLR_MTCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG, v) +#define SET_CB_SCTLR_SHCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG, v) +#define SET_CB_SCTLR_RACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG, v) +#define SET_CB_SCTLR_WACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG, v) +#define SET_CB_SCTLR_NSCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG, v) + +#define GET_CB_SCTLR_M(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, M) +#define GET_CB_SCTLR_TRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE) +#define GET_CB_SCTLR_AFE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE) +#define GET_CB_SCTLR_AFFD(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD) +#define GET_CB_SCTLR_E(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, E) +#define GET_CB_SCTLR_CFRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE) +#define GET_CB_SCTLR_CFIE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE) +#define GET_CB_SCTLR_CFCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG) +#define GET_CB_SCTLR_HUPCF(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF) +#define GET_CB_SCTLR_WXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN) +#define GET_CB_SCTLR_UWXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN) +#define GET_CB_SCTLR_ASIDPNE(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE) +#define GET_CB_SCTLR_TRANSIENTCFG(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG) +#define GET_CB_SCTLR_MEMATTR(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR) +#define GET_CB_SCTLR_MTCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG) +#define GET_CB_SCTLR_SHCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG) +#define GET_CB_SCTLR_RACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG) +#define GET_CB_SCTLR_WACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG) +#define GET_CB_SCTLR_NSCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG) + +/* Invalidate TLB by ASID: CB_TLBIASID */ +#define SET_CB_TLBIASID_ASID(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID, v) + +#define GET_CB_TLBIASID_ASID(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID) + +/* Invalidate TLB by VA: CB_TLBIVA */ +#define SET_CB_TLBIVA_ASID(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID, v) +#define SET_CB_TLBIVA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA, v) + +#define GET_CB_TLBIVA_ASID(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID) +#define GET_CB_TLBIVA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA) + +/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */ +#define SET_CB_TLBIVAA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA, v) + +#define GET_CB_TLBIVAA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA) + +/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */ +#define SET_CB_TLBIVAAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA, v) + +#define GET_CB_TLBIVAAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA) + +/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */ +#define SET_CB_TLBIVAL_ASID(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID, v) +#define SET_CB_TLBIVAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA, v) + +#define GET_CB_TLBIVAL_ASID(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID) +#define GET_CB_TLBIVAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA) + +/* TLB Status: CB_TLBSTATUS */ +#define SET_CB_TLBSTATUS_SACTIVE(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE, v) + +#define GET_CB_TLBSTATUS_SACTIVE(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE) + +/* Translation Table Base Control Register: CB_TTBCR */ +/* These are shared between VMSA and LPAE */ +#define GET_CB_TTBCR_EAE(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE) +#define SET_CB_TTBCR_EAE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE, v) + +#define SET_CB_TTBCR_NSCFG0(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v) +#define SET_CB_TTBCR_NSCFG1(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v) + +#define GET_CB_TTBCR_NSCFG0(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0) +#define GET_CB_TTBCR_NSCFG1(b, c) \ + GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1) + +#ifdef CONFIG_IOMMU_LPAE + +/* LPAE format */ + +/* Translation Table Base Register 0: CB_TTBR */ +#define SET_TTBR0(b, c, v) SET_CTX_REG_Q(CB_TTBR0, (b), (c), (v)) +#define SET_TTBR1(b, c, v) SET_CTX_REG_Q(CB_TTBR1, (b), (c), (v)) + +#define SET_CB_TTBR0_ASID(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID, v) +#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR, v) + +#define GET_CB_TTBR0_ASID(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID) +#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR) +#define GET_CB_TTBR0(b, c) GET_CTX_REG_Q(CB_TTBR0, (b), (c)) + +/* Translation Table Base Control Register: CB_TTBCR */ +#define SET_CB_TTBCR_T0SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ, v) +#define SET_CB_TTBCR_T1SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ, v) +#define SET_CB_TTBCR_EPD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0, v) +#define SET_CB_TTBCR_EPD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1, v) +#define SET_CB_TTBCR_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0, v) +#define SET_CB_TTBCR_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1, v) +#define SET_CB_TTBCR_ORGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0, v) +#define SET_CB_TTBCR_ORGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1, v) +#define SET_CB_TTBCR_NSCFG0(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v) +#define SET_CB_TTBCR_NSCFG1(b, c, v) \ + SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v) + +#define SET_CB_TTBCR_SH0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH0, v) +#define SET_CB_TTBCR_SH1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH1, v) +#define SET_CB_TTBCR_A1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, A1, v) + +#define GET_CB_TTBCR_T0SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ) +#define GET_CB_TTBCR_T1SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ) +#define GET_CB_TTBCR_EPD0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0) +#define GET_CB_TTBCR_EPD1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1) +#define GET_CB_TTBCR_IRGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0) +#define GET_CB_TTBCR_IRGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1) +#define GET_CB_TTBCR_ORGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0) +#define GET_CB_TTBCR_ORGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1) + +#define SET_CB_MAIR0(b, c, v) SET_CTX_REG(CB_MAIR0, (b), (c), (v)) +#define SET_CB_MAIR1(b, c, v) SET_CTX_REG(CB_MAIR1, (b), (c), (v)) + +#define GET_CB_MAIR0(b, c) GET_CTX_REG(CB_MAIR0, (b), (c)) +#define GET_CB_MAIR1(b, c) GET_CTX_REG(CB_MAIR1, (b), (c)) +#else +#define SET_TTBR0(b, c, v) SET_CTX_REG(CB_TTBR0, (b), (c), (v)) +#define SET_TTBR1(b, c, v) SET_CTX_REG(CB_TTBR1, (b), (c), (v)) + +#define SET_CB_TTBCR_PD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD0, v) +#define SET_CB_TTBCR_PD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD1, v) + +#define SET_CB_TTBR0_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1, v) +#define SET_CB_TTBR0_S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, S, v) +#define SET_CB_TTBR0_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN, v) +#define SET_CB_TTBR0_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS, v) +#define SET_CB_TTBR0_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0, v) +#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR, v) + +#define GET_CB_TTBR0_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1) +#define GET_CB_TTBR0_S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, S) +#define GET_CB_TTBR0_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN) +#define GET_CB_TTBR0_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS) +#define GET_CB_TTBR0_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0) +#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR) +#endif + +/* Translation Table Base Register 1: CB_TTBR1 */ +#define SET_CB_TTBR1_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1, v) +#define SET_CB_TTBR1_0S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, S, v) +#define SET_CB_TTBR1_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN, v) +#define SET_CB_TTBR1_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS, v) +#define SET_CB_TTBR1_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0, v) +#define SET_CB_TTBR1_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR, v) + +#define GET_CB_TTBR1_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1) +#define GET_CB_TTBR1_0S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, S) +#define GET_CB_TTBR1_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN) +#define GET_CB_TTBR1_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS) +#define GET_CB_TTBR1_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0) +#define GET_CB_TTBR1_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR) + +/* Global Register Space 0 */ +#define CR0 (0x0000) +#define SCR1 (0x0004) +#define CR2 (0x0008) +#define ACR (0x0010) +#define IDR0 (0x0020) +#define IDR1 (0x0024) +#define IDR2 (0x0028) +#define IDR7 (0x003C) +#define GFAR (0x0040) +#define GFSR (0x0048) +#define GFSRRESTORE (0x004C) +#define GFSYNR0 (0x0050) +#define GFSYNR1 (0x0054) +#define GFSYNR2 (0x0058) +#define TLBIVMID (0x0064) +#define TLBIALLNSNH (0x0068) +#define TLBIALLH (0x006C) +#define TLBGSYNC (0x0070) +#define TLBGSTATUS (0x0074) +#define TLBIVAH (0x0078) +#define GATS1UR (0x0100) +#define GATS1UW (0x0108) +#define GATS1PR (0x0110) +#define GATS1PW (0x0118) +#define GATS12UR (0x0120) +#define GATS12UW (0x0128) +#define GATS12PR (0x0130) +#define GATS12PW (0x0138) +#define GPAR (0x0180) +#define GATSR (0x0188) +#define NSCR0 (0x0400) +#define NSCR2 (0x0408) +#define NSACR (0x0410) +#define NSGFAR (0x0440) +#define NSGFSRRESTORE (0x044C) +#define SMR (0x0800) +#define S2CR (0x0C00) + +/* SMMU_LOCAL */ +#define SMMU_INTR_SEL_NS (0x2000) + +/* Global Register Space 1 */ +#define CBAR (0x1000) +#define CBFRSYNRA (0x1400) + +/* Implementation defined Register Space */ +#define MICRO_MMU_CTRL (0x2000) +#define PREDICTIONDIS0 (0x204C) +#define PREDICTIONDIS1 (0x2050) +#define S1L1BFBLP0 (0x215C) + +/* Performance Monitoring Register Space */ +#define PMEVCNTR_N (0x3000) +#define PMEVTYPER_N (0x3400) +#define PMCGCR_N (0x3800) +#define PMCGSMR_N (0x3A00) +#define PMCNTENSET_N (0x3C00) +#define PMCNTENCLR_N (0x3C20) +#define PMINTENSET_N (0x3C40) +#define PMINTENCLR_N (0x3C60) +#define PMOVSCLR_N (0x3C80) +#define PMOVSSET_N (0x3CC0) +#define PMCFGR (0x3E00) +#define PMCR (0x3E04) +#define PMCEID0 (0x3E20) +#define PMCEID1 (0x3E24) +#define PMAUTHSTATUS (0x3FB8) +#define PMDEVTYPE (0x3FCC) + +/* Secure Status Determination Address Space */ +#define SSDR_N (0x4000) + +/* Stage 1 Context Bank Format */ +#define CB_SCTLR (0x000) +#define CB_ACTLR (0x004) +#define CB_RESUME (0x008) +#define CB_TTBR0 (0x020) +#define CB_TTBR1 (0x028) +#define CB_TTBCR (0x030) +#define CB_CONTEXTIDR (0x034) +#define CB_PRRR (0x038) +#define CB_MAIR0 (0x038) +#define CB_NMRR (0x03C) +#define CB_MAIR1 (0x03C) +#define CB_PAR (0x050) +#define CB_FSR (0x058) +#define CB_FSRRESTORE (0x05C) +#define CB_FAR (0x060) +#define CB_FSYNR0 (0x068) +#define CB_FSYNR1 (0x06C) +#define CB_TLBIVA (0x600) +#define CB_TLBIVAA (0x608) +#define CB_TLBIASID (0x610) +#define CB_TLBIALL (0x618) +#define CB_TLBIVAL (0x620) +#define CB_TLBIVAAL (0x628) +#define CB_TLBSYNC (0x7F0) +#define CB_TLBSTATUS (0x7F4) +#define CB_ATS1PR (0x800) +#define CB_ATS1PW (0x808) +#define CB_ATS1UR (0x810) +#define CB_ATS1UW (0x818) +#define CB_ATSR (0x8F0) +#define CB_PMXEVCNTR_N (0xE00) +#define CB_PMXEVTYPER_N (0xE80) +#define CB_PMCFGR (0xF00) +#define CB_PMCR (0xF04) +#define CB_PMCEID0 (0xF20) +#define CB_PMCEID1 (0xF24) +#define CB_PMCNTENSET (0xF40) +#define CB_PMCNTENCLR (0xF44) +#define CB_PMINTENSET (0xF48) +#define CB_PMINTENCLR (0xF4C) +#define CB_PMOVSCLR (0xF50) +#define CB_PMOVSSET (0xF58) +#define CB_PMAUTHSTATUS (0xFB8) + +/* Global Register Fields */ +/* Configuration Register: CR0 */ +#define CR0_NSCFG (CR0_NSCFG_MASK << CR0_NSCFG_SHIFT) +#define CR0_WACFG (CR0_WACFG_MASK << CR0_WACFG_SHIFT) +#define CR0_RACFG (CR0_RACFG_MASK << CR0_RACFG_SHIFT) +#define CR0_SHCFG (CR0_SHCFG_MASK << CR0_SHCFG_SHIFT) +#define CR0_SMCFCFG (CR0_SMCFCFG_MASK << CR0_SMCFCFG_SHIFT) +#define CR0_MTCFG (CR0_MTCFG_MASK << CR0_MTCFG_SHIFT) +#define CR0_MEMATTR (CR0_MEMATTR_MASK << CR0_MEMATTR_SHIFT) +#define CR0_BSU (CR0_BSU_MASK << CR0_BSU_SHIFT) +#define CR0_FB (CR0_FB_MASK << CR0_FB_SHIFT) +#define CR0_PTM (CR0_PTM_MASK << CR0_PTM_SHIFT) +#define CR0_VMIDPNE (CR0_VMIDPNE_MASK << CR0_VMIDPNE_SHIFT) +#define CR0_USFCFG (CR0_USFCFG_MASK << CR0_USFCFG_SHIFT) +#define CR0_GSE (CR0_GSE_MASK << CR0_GSE_SHIFT) +#define CR0_STALLD (CR0_STALLD_MASK << CR0_STALLD_SHIFT) +#define CR0_TRANSIENTCFG (CR0_TRANSIENTCFG_MASK << CR0_TRANSIENTCFG_SHIFT) +#define CR0_GCFGFIE (CR0_GCFGFIE_MASK << CR0_GCFGFIE_SHIFT) +#define CR0_GCFGFRE (CR0_GCFGFRE_MASK << CR0_GCFGFRE_SHIFT) +#define CR0_GFIE (CR0_GFIE_MASK << CR0_GFIE_SHIFT) +#define CR0_GFRE (CR0_GFRE_MASK << CR0_GFRE_SHIFT) +#define CR0_CLIENTPD (CR0_CLIENTPD_MASK << CR0_CLIENTPD_SHIFT) + +/* Configuration Register: CR2 */ +#define CR2_BPVMID (CR2_BPVMID_MASK << CR2_BPVMID_SHIFT) + +/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */ +#define GATS1PR_ADDR (GATS1PR_ADDR_MASK << GATS1PR_ADDR_SHIFT) +#define GATS1PR_NDX (GATS1PR_NDX_MASK << GATS1PR_NDX_SHIFT) + +/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */ +#define GATS1PW_ADDR (GATS1PW_ADDR_MASK << GATS1PW_ADDR_SHIFT) +#define GATS1PW_NDX (GATS1PW_NDX_MASK << GATS1PW_NDX_SHIFT) + +/* Global Address Translation, Stage 1, User Read: GATS1UR */ +#define GATS1UR_ADDR (GATS1UR_ADDR_MASK << GATS1UR_ADDR_SHIFT) +#define GATS1UR_NDX (GATS1UR_NDX_MASK << GATS1UR_NDX_SHIFT) + +/* Global Address Translation, Stage 1, User Write: GATS1UW */ +#define GATS1UW_ADDR (GATS1UW_ADDR_MASK << GATS1UW_ADDR_SHIFT) +#define GATS1UW_NDX (GATS1UW_NDX_MASK << GATS1UW_NDX_SHIFT) + +/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */ +#define GATS12PR_ADDR (GATS12PR_ADDR_MASK << GATS12PR_ADDR_SHIFT) +#define GATS12PR_NDX (GATS12PR_NDX_MASK << GATS12PR_NDX_SHIFT) + +/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */ +#define GATS12PW_ADDR (GATS12PW_ADDR_MASK << GATS12PW_ADDR_SHIFT) +#define GATS12PW_NDX (GATS12PW_NDX_MASK << GATS12PW_NDX_SHIFT) + +/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */ +#define GATS12UR_ADDR (GATS12UR_ADDR_MASK << GATS12UR_ADDR_SHIFT) +#define GATS12UR_NDX (GATS12UR_NDX_MASK << GATS12UR_NDX_SHIFT) + +/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */ +#define GATS12UW_ADDR (GATS12UW_ADDR_MASK << GATS12UW_ADDR_SHIFT) +#define GATS12UW_NDX (GATS12UW_NDX_MASK << GATS12UW_NDX_SHIFT) + +/* Global Address Translation Status Register: GATSR */ +#define GATSR_ACTIVE (GATSR_ACTIVE_MASK << GATSR_ACTIVE_SHIFT) + +/* Global Fault Address Register: GFAR */ +#define GFAR_FADDR (GFAR_FADDR_MASK << GFAR_FADDR_SHIFT) + +/* Global Fault Status Register: GFSR */ +#define GFSR_ICF (GFSR_ICF_MASK << GFSR_ICF_SHIFT) +#define GFSR_USF (GFSR_USF_MASK << GFSR_USF_SHIFT) +#define GFSR_SMCF (GFSR_SMCF_MASK << GFSR_SMCF_SHIFT) +#define GFSR_UCBF (GFSR_UCBF_MASK << GFSR_UCBF_SHIFT) +#define GFSR_UCIF (GFSR_UCIF_MASK << GFSR_UCIF_SHIFT) +#define GFSR_CAF (GFSR_CAF_MASK << GFSR_CAF_SHIFT) +#define GFSR_EF (GFSR_EF_MASK << GFSR_EF_SHIFT) +#define GFSR_PF (GFSR_PF_MASK << GFSR_PF_SHIFT) +#define GFSR_MULTI (GFSR_MULTI_MASK << GFSR_MULTI_SHIFT) + +/* Global Fault Syndrome Register 0: GFSYNR0 */ +#define GFSYNR0_NESTED (GFSYNR0_NESTED_MASK << GFSYNR0_NESTED_SHIFT) +#define GFSYNR0_WNR (GFSYNR0_WNR_MASK << GFSYNR0_WNR_SHIFT) +#define GFSYNR0_PNU (GFSYNR0_PNU_MASK << GFSYNR0_PNU_SHIFT) +#define GFSYNR0_IND (GFSYNR0_IND_MASK << GFSYNR0_IND_SHIFT) +#define GFSYNR0_NSSTATE (GFSYNR0_NSSTATE_MASK << GFSYNR0_NSSTATE_SHIFT) +#define GFSYNR0_NSATTR (GFSYNR0_NSATTR_MASK << GFSYNR0_NSATTR_SHIFT) + +/* Global Fault Syndrome Register 1: GFSYNR1 */ +#define GFSYNR1_SID (GFSYNR1_SID_MASK << GFSYNR1_SID_SHIFT) + +/* Global Physical Address Register: GPAR */ +#define GPAR_F (GPAR_F_MASK << GPAR_F_SHIFT) +#define GPAR_SS (GPAR_SS_MASK << GPAR_SS_SHIFT) +#define GPAR_OUTER (GPAR_OUTER_MASK << GPAR_OUTER_SHIFT) +#define GPAR_INNER (GPAR_INNER_MASK << GPAR_INNER_SHIFT) +#define GPAR_SH (GPAR_SH_MASK << GPAR_SH_SHIFT) +#define GPAR_NS (GPAR_NS_MASK << GPAR_NS_SHIFT) +#define GPAR_NOS (GPAR_NOS_MASK << GPAR_NOS_SHIFT) +#define GPAR_PA (GPAR_PA_MASK << GPAR_PA_SHIFT) +#define GPAR_TF (GPAR_TF_MASK << GPAR_TF_SHIFT) +#define GPAR_AFF (GPAR_AFF_MASK << GPAR_AFF_SHIFT) +#define GPAR_PF (GPAR_PF_MASK << GPAR_PF_SHIFT) +#define GPAR_EF (GPAR_EF_MASK << GPAR_EF_SHIFT) +#define GPAR_TLCMCF (GPAR_TLBMCF_MASK << GPAR_TLCMCF_SHIFT) +#define GPAR_TLBLKF (GPAR_TLBLKF_MASK << GPAR_TLBLKF_SHIFT) +#define GPAR_UCBF (GPAR_UCBF_MASK << GFAR_UCBF_SHIFT) + +/* Identification Register: IDR0 */ +#define IDR0_NUMSMRG (IDR0_NUMSMRG_MASK << IDR0_NUMSMGR_SHIFT) +#define IDR0_NUMSIDB (IDR0_NUMSIDB_MASK << IDR0_NUMSIDB_SHIFT) +#define IDR0_BTM (IDR0_BTM_MASK << IDR0_BTM_SHIFT) +#define IDR0_CTTW (IDR0_CTTW_MASK << IDR0_CTTW_SHIFT) +#define IDR0_NUMIRPT (IDR0_NUMIPRT_MASK << IDR0_NUMIRPT_SHIFT) +#define IDR0_PTFS (IDR0_PTFS_MASK << IDR0_PTFS_SHIFT) +#define IDR0_SMS (IDR0_SMS_MASK << IDR0_SMS_SHIFT) +#define IDR0_NTS (IDR0_NTS_MASK << IDR0_NTS_SHIFT) +#define IDR0_S2TS (IDR0_S2TS_MASK << IDR0_S2TS_SHIFT) +#define IDR0_S1TS (IDR0_S1TS_MASK << IDR0_S1TS_SHIFT) +#define IDR0_SES (IDR0_SES_MASK << IDR0_SES_SHIFT) + +/* Identification Register: IDR1 */ +#define IDR1_NUMCB (IDR1_NUMCB_MASK << IDR1_NUMCB_SHIFT) +#define IDR1_NUMSSDNDXB (IDR1_NUMSSDNDXB_MASK << IDR1_NUMSSDNDXB_SHIFT) +#define IDR1_SSDTP (IDR1_SSDTP_MASK << IDR1_SSDTP_SHIFT) +#define IDR1_SMCD (IDR1_SMCD_MASK << IDR1_SMCD_SHIFT) +#define IDR1_NUMS2CB (IDR1_NUMS2CB_MASK << IDR1_NUMS2CB_SHIFT) +#define IDR1_NUMPAGENDXB (IDR1_NUMPAGENDXB_MASK << IDR1_NUMPAGENDXB_SHIFT) +#define IDR1_PAGESIZE (IDR1_PAGESIZE_MASK << IDR1_PAGESIZE_SHIFT) + +/* Identification Register: IDR2 */ +#define IDR2_IAS (IDR2_IAS_MASK << IDR2_IAS_SHIFT) +#define IDR1_OAS (IDR2_OAS_MASK << IDR2_OAS_SHIFT) + +/* Identification Register: IDR7 */ +#define IDR7_MINOR (IDR7_MINOR_MASK << IDR7_MINOR_SHIFT) +#define IDR7_MAJOR (IDR7_MAJOR_MASK << IDR7_MAJOR_SHIFT) + +/* Stream to Context Register: S2CR */ +#define S2CR_CBNDX (S2CR_CBNDX_MASK << S2cR_CBNDX_SHIFT) +#define S2CR_SHCFG (S2CR_SHCFG_MASK << s2CR_SHCFG_SHIFT) +#define S2CR_MTCFG (S2CR_MTCFG_MASK << S2CR_MTCFG_SHIFT) +#define S2CR_MEMATTR (S2CR_MEMATTR_MASK << S2CR_MEMATTR_SHIFT) +#define S2CR_TYPE (S2CR_TYPE_MASK << S2CR_TYPE_SHIFT) +#define S2CR_NSCFG (S2CR_NSCFG_MASK << S2CR_NSCFG_SHIFT) +#define S2CR_RACFG (S2CR_RACFG_MASK << S2CR_RACFG_SHIFT) +#define S2CR_WACFG (S2CR_WACFG_MASK << S2CR_WACFG_SHIFT) +#define S2CR_PRIVCFG (S2CR_PRIVCFG_MASK << S2CR_PRIVCFG_SHIFT) +#define S2CR_INSTCFG (S2CR_INSTCFG_MASK << S2CR_INSTCFG_SHIFT) +#define S2CR_TRANSIENTCFG (S2CR_TRANSIENTCFG_MASK << S2CR_TRANSIENTCFG_SHIFT) +#define S2CR_VMID (S2CR_VMID_MASK << S2CR_VMID_SHIFT) +#define S2CR_BSU (S2CR_BSU_MASK << S2CR_BSU_SHIFT) +#define S2CR_FB (S2CR_FB_MASK << S2CR_FB_SHIFT) + +/* Stream Match Register: SMR */ +#define SMR_ID (SMR_ID_MASK << SMR_ID_SHIFT) +#define SMR_MASK (SMR_MASK_MASK << SMR_MASK_SHIFT) +#define SMR_VALID (SMR_VALID_MASK << SMR_VALID_SHIFT) + +/* Global TLB Status: TLBGSTATUS */ +#define TLBGSTATUS_GSACTIVE (TLBGSTATUS_GSACTIVE_MASK << \ + TLBGSTATUS_GSACTIVE_SHIFT) +/* Invalidate Hyp TLB by VA: TLBIVAH */ +#define TLBIVAH_ADDR (TLBIVAH_ADDR_MASK << TLBIVAH_ADDR_SHIFT) + +/* Invalidate TLB by VMID: TLBIVMID */ +#define TLBIVMID_VMID (TLBIVMID_VMID_MASK << TLBIVMID_VMID_SHIFT) + +/* Context Bank Attribute Register: CBAR */ +#define CBAR_VMID (CBAR_VMID_MASK << CBAR_VMID_SHIFT) +#define CBAR_CBNDX (CBAR_CBNDX_MASK << CBAR_CBNDX_SHIFT) +#define CBAR_BPSHCFG (CBAR_BPSHCFG_MASK << CBAR_BPSHCFG_SHIFT) +#define CBAR_HYPC (CBAR_HYPC_MASK << CBAR_HYPC_SHIFT) +#define CBAR_FB (CBAR_FB_MASK << CBAR_FB_SHIFT) +#define CBAR_MEMATTR (CBAR_MEMATTR_MASK << CBAR_MEMATTR_SHIFT) +#define CBAR_TYPE (CBAR_TYPE_MASK << CBAR_TYPE_SHIFT) +#define CBAR_BSU (CBAR_BSU_MASK << CBAR_BSU_SHIFT) +#define CBAR_RACFG (CBAR_RACFG_MASK << CBAR_RACFG_SHIFT) +#define CBAR_WACFG (CBAR_WACFG_MASK << CBAR_WACFG_SHIFT) +#define CBAR_IRPTNDX (CBAR_IRPTNDX_MASK << CBAR_IRPTNDX_SHIFT) + +/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */ +#define CBFRSYNRA_SID (CBFRSYNRA_SID_MASK << CBFRSYNRA_SID_SHIFT) + +/* Performance Monitoring Register Fields */ + +/* Stage 1 Context Bank Format Fields */ +/* Auxiliary Control Register: CB_ACTLR */ +#define CB_ACTLR_REQPRIORITY \ + (CB_ACTLR_REQPRIORITY_MASK << CB_ACTLR_REQPRIORITY_SHIFT) +#define CB_ACTLR_REQPRIORITYCFG \ + (CB_ACTLR_REQPRIORITYCFG_MASK << CB_ACTLR_REQPRIORITYCFG_SHIFT) +#define CB_ACTLR_PRIVCFG (CB_ACTLR_PRIVCFG_MASK << CB_ACTLR_PRIVCFG_SHIFT) +#define CB_ACTLR_BPRCOSH (CB_ACTLR_BPRCOSH_MASK << CB_ACTLR_BPRCOSH_SHIFT) +#define CB_ACTLR_BPRCISH (CB_ACTLR_BPRCISH_MASK << CB_ACTLR_BPRCISH_SHIFT) +#define CB_ACTLR_BPRCNSH (CB_ACTLR_BPRCNSH_MASK << CB_ACTLR_BPRCNSH_SHIFT) + +/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */ +#define CB_ATS1PR_ADDR (CB_ATS1PR_ADDR_MASK << CB_ATS1PR_ADDR_SHIFT) + +/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */ +#define CB_ATS1PW_ADDR (CB_ATS1PW_ADDR_MASK << CB_ATS1PW_ADDR_SHIFT) + +/* Address Translation, Stage 1, User Read: CB_ATS1UR */ +#define CB_ATS1UR_ADDR (CB_ATS1UR_ADDR_MASK << CB_ATS1UR_ADDR_SHIFT) + +/* Address Translation, Stage 1, User Write: CB_ATS1UW */ +#define CB_ATS1UW_ADDR (CB_ATS1UW_ADDR_MASK << CB_ATS1UW_ADDR_SHIFT) + +/* Address Translation Status Register: CB_ATSR */ +#define CB_ATSR_ACTIVE (CB_ATSR_ACTIVE_MASK << CB_ATSR_ACTIVE_SHIFT) + +/* Context ID Register: CB_CONTEXTIDR */ +#define CB_CONTEXTIDR_ASID (CB_CONTEXTIDR_ASID_MASK << \ + CB_CONTEXTIDR_ASID_SHIFT) +#define CB_CONTEXTIDR_PROCID (CB_CONTEXTIDR_PROCID_MASK << \ + CB_CONTEXTIDR_PROCID_SHIFT) + +/* Fault Address Register: CB_FAR */ +#define CB_FAR_FADDR (CB_FAR_FADDR_MASK << CB_FAR_FADDR_SHIFT) + +/* Fault Status Register: CB_FSR */ +#define CB_FSR_TF (CB_FSR_TF_MASK << CB_FSR_TF_SHIFT) +#define CB_FSR_AFF (CB_FSR_AFF_MASK << CB_FSR_AFF_SHIFT) +#define CB_FSR_PF (CB_FSR_PF_MASK << CB_FSR_PF_SHIFT) +#define CB_FSR_EF (CB_FSR_EF_MASK << CB_FSR_EF_SHIFT) +#define CB_FSR_TLBMCF (CB_FSR_TLBMCF_MASK << CB_FSR_TLBMCF_SHIFT) +#define CB_FSR_TLBLKF (CB_FSR_TLBLKF_MASK << CB_FSR_TLBLKF_SHIFT) +#define CB_FSR_SS (CB_FSR_SS_MASK << CB_FSR_SS_SHIFT) +#define CB_FSR_MULTI (CB_FSR_MULTI_MASK << CB_FSR_MULTI_SHIFT) + +/* Fault Syndrome Register 0: CB_FSYNR0 */ +#define CB_FSYNR0_PLVL (CB_FSYNR0_PLVL_MASK << CB_FSYNR0_PLVL_SHIFT) +#define CB_FSYNR0_S1PTWF (CB_FSYNR0_S1PTWF_MASK << CB_FSYNR0_S1PTWF_SHIFT) +#define CB_FSYNR0_WNR (CB_FSYNR0_WNR_MASK << CB_FSYNR0_WNR_SHIFT) +#define CB_FSYNR0_PNU (CB_FSYNR0_PNU_MASK << CB_FSYNR0_PNU_SHIFT) +#define CB_FSYNR0_IND (CB_FSYNR0_IND_MASK << CB_FSYNR0_IND_SHIFT) +#define CB_FSYNR0_NSSTATE (CB_FSYNR0_NSSTATE_MASK << CB_FSYNR0_NSSTATE_SHIFT) +#define CB_FSYNR0_NSATTR (CB_FSYNR0_NSATTR_MASK << CB_FSYNR0_NSATTR_SHIFT) +#define CB_FSYNR0_ATOF (CB_FSYNR0_ATOF_MASK << CB_FSYNR0_ATOF_SHIFT) +#define CB_FSYNR0_PTWF (CB_FSYNR0_PTWF_MASK << CB_FSYNR0_PTWF_SHIFT) +#define CB_FSYNR0_AFR (CB_FSYNR0_AFR_MASK << CB_FSYNR0_AFR_SHIFT) +#define CB_FSYNR0_S1CBNDX (CB_FSYNR0_S1CBNDX_MASK << CB_FSYNR0_S1CBNDX_SHIFT) + +/* Normal Memory Remap Register: CB_NMRR */ +#define CB_NMRR_IR0 (CB_NMRR_IR0_MASK << CB_NMRR_IR0_SHIFT) +#define CB_NMRR_IR1 (CB_NMRR_IR1_MASK << CB_NMRR_IR1_SHIFT) +#define CB_NMRR_IR2 (CB_NMRR_IR2_MASK << CB_NMRR_IR2_SHIFT) +#define CB_NMRR_IR3 (CB_NMRR_IR3_MASK << CB_NMRR_IR3_SHIFT) +#define CB_NMRR_IR4 (CB_NMRR_IR4_MASK << CB_NMRR_IR4_SHIFT) +#define CB_NMRR_IR5 (CB_NMRR_IR5_MASK << CB_NMRR_IR5_SHIFT) +#define CB_NMRR_IR6 (CB_NMRR_IR6_MASK << CB_NMRR_IR6_SHIFT) +#define CB_NMRR_IR7 (CB_NMRR_IR7_MASK << CB_NMRR_IR7_SHIFT) +#define CB_NMRR_OR0 (CB_NMRR_OR0_MASK << CB_NMRR_OR0_SHIFT) +#define CB_NMRR_OR1 (CB_NMRR_OR1_MASK << CB_NMRR_OR1_SHIFT) +#define CB_NMRR_OR2 (CB_NMRR_OR2_MASK << CB_NMRR_OR2_SHIFT) +#define CB_NMRR_OR3 (CB_NMRR_OR3_MASK << CB_NMRR_OR3_SHIFT) +#define CB_NMRR_OR4 (CB_NMRR_OR4_MASK << CB_NMRR_OR4_SHIFT) +#define CB_NMRR_OR5 (CB_NMRR_OR5_MASK << CB_NMRR_OR5_SHIFT) +#define CB_NMRR_OR6 (CB_NMRR_OR6_MASK << CB_NMRR_OR6_SHIFT) +#define CB_NMRR_OR7 (CB_NMRR_OR7_MASK << CB_NMRR_OR7_SHIFT) + +/* Physical Address Register: CB_PAR */ +#define CB_PAR_F (CB_PAR_F_MASK << CB_PAR_F_SHIFT) +#define CB_PAR_SS (CB_PAR_SS_MASK << CB_PAR_SS_SHIFT) +#define CB_PAR_OUTER (CB_PAR_OUTER_MASK << CB_PAR_OUTER_SHIFT) +#define CB_PAR_INNER (CB_PAR_INNER_MASK << CB_PAR_INNER_SHIFT) +#define CB_PAR_SH (CB_PAR_SH_MASK << CB_PAR_SH_SHIFT) +#define CB_PAR_NS (CB_PAR_NS_MASK << CB_PAR_NS_SHIFT) +#define CB_PAR_NOS (CB_PAR_NOS_MASK << CB_PAR_NOS_SHIFT) +#define CB_PAR_PA (CB_PAR_PA_MASK << CB_PAR_PA_SHIFT) +#define CB_PAR_TF (CB_PAR_TF_MASK << CB_PAR_TF_SHIFT) +#define CB_PAR_AFF (CB_PAR_AFF_MASK << CB_PAR_AFF_SHIFT) +#define CB_PAR_PF (CB_PAR_PF_MASK << CB_PAR_PF_SHIFT) +#define CB_PAR_EF (CB_PAR_EF_MASK << CB_PAR_EF_SHIFT) +#define CB_PAR_TLBMCF (CB_PAR_TLBMCF_MASK << CB_PAR_TLBMCF_SHIFT) +#define CB_PAR_TLBLKF (CB_PAR_TLBLKF_MASK << CB_PAR_TLBLKF_SHIFT) +#define CB_PAR_ATOT (CB_PAR_ATOT_MASK << CB_PAR_ATOT_SHIFT) +#define CB_PAR_PLVL (CB_PAR_PLVL_MASK << CB_PAR_PLVL_SHIFT) +#define CB_PAR_STAGE (CB_PAR_STAGE_MASK << CB_PAR_STAGE_SHIFT) + +/* Primary Region Remap Register: CB_PRRR */ +#define CB_PRRR_TR0 (CB_PRRR_TR0_MASK << CB_PRRR_TR0_SHIFT) +#define CB_PRRR_TR1 (CB_PRRR_TR1_MASK << CB_PRRR_TR1_SHIFT) +#define CB_PRRR_TR2 (CB_PRRR_TR2_MASK << CB_PRRR_TR2_SHIFT) +#define CB_PRRR_TR3 (CB_PRRR_TR3_MASK << CB_PRRR_TR3_SHIFT) +#define CB_PRRR_TR4 (CB_PRRR_TR4_MASK << CB_PRRR_TR4_SHIFT) +#define CB_PRRR_TR5 (CB_PRRR_TR5_MASK << CB_PRRR_TR5_SHIFT) +#define CB_PRRR_TR6 (CB_PRRR_TR6_MASK << CB_PRRR_TR6_SHIFT) +#define CB_PRRR_TR7 (CB_PRRR_TR7_MASK << CB_PRRR_TR7_SHIFT) +#define CB_PRRR_DS0 (CB_PRRR_DS0_MASK << CB_PRRR_DS0_SHIFT) +#define CB_PRRR_DS1 (CB_PRRR_DS1_MASK << CB_PRRR_DS1_SHIFT) +#define CB_PRRR_NS0 (CB_PRRR_NS0_MASK << CB_PRRR_NS0_SHIFT) +#define CB_PRRR_NS1 (CB_PRRR_NS1_MASK << CB_PRRR_NS1_SHIFT) +#define CB_PRRR_NOS0 (CB_PRRR_NOS0_MASK << CB_PRRR_NOS0_SHIFT) +#define CB_PRRR_NOS1 (CB_PRRR_NOS1_MASK << CB_PRRR_NOS1_SHIFT) +#define CB_PRRR_NOS2 (CB_PRRR_NOS2_MASK << CB_PRRR_NOS2_SHIFT) +#define CB_PRRR_NOS3 (CB_PRRR_NOS3_MASK << CB_PRRR_NOS3_SHIFT) +#define CB_PRRR_NOS4 (CB_PRRR_NOS4_MASK << CB_PRRR_NOS4_SHIFT) +#define CB_PRRR_NOS5 (CB_PRRR_NOS5_MASK << CB_PRRR_NOS5_SHIFT) +#define CB_PRRR_NOS6 (CB_PRRR_NOS6_MASK << CB_PRRR_NOS6_SHIFT) +#define CB_PRRR_NOS7 (CB_PRRR_NOS7_MASK << CB_PRRR_NOS7_SHIFT) + +/* Transaction Resume: CB_RESUME */ +#define CB_RESUME_TNR (CB_RESUME_TNR_MASK << CB_RESUME_TNR_SHIFT) + +/* System Control Register: CB_SCTLR */ +#define CB_SCTLR_M (CB_SCTLR_M_MASK << CB_SCTLR_M_SHIFT) +#define CB_SCTLR_TRE (CB_SCTLR_TRE_MASK << CB_SCTLR_TRE_SHIFT) +#define CB_SCTLR_AFE (CB_SCTLR_AFE_MASK << CB_SCTLR_AFE_SHIFT) +#define CB_SCTLR_AFFD (CB_SCTLR_AFFD_MASK << CB_SCTLR_AFFD_SHIFT) +#define CB_SCTLR_E (CB_SCTLR_E_MASK << CB_SCTLR_E_SHIFT) +#define CB_SCTLR_CFRE (CB_SCTLR_CFRE_MASK << CB_SCTLR_CFRE_SHIFT) +#define CB_SCTLR_CFIE (CB_SCTLR_CFIE_MASK << CB_SCTLR_CFIE_SHIFT) +#define CB_SCTLR_CFCFG (CB_SCTLR_CFCFG_MASK << CB_SCTLR_CFCFG_SHIFT) +#define CB_SCTLR_HUPCF (CB_SCTLR_HUPCF_MASK << CB_SCTLR_HUPCF_SHIFT) +#define CB_SCTLR_WXN (CB_SCTLR_WXN_MASK << CB_SCTLR_WXN_SHIFT) +#define CB_SCTLR_UWXN (CB_SCTLR_UWXN_MASK << CB_SCTLR_UWXN_SHIFT) +#define CB_SCTLR_ASIDPNE (CB_SCTLR_ASIDPNE_MASK << CB_SCTLR_ASIDPNE_SHIFT) +#define CB_SCTLR_TRANSIENTCFG (CB_SCTLR_TRANSIENTCFG_MASK << \ + CB_SCTLR_TRANSIENTCFG_SHIFT) +#define CB_SCTLR_MEMATTR (CB_SCTLR_MEMATTR_MASK << CB_SCTLR_MEMATTR_SHIFT) +#define CB_SCTLR_MTCFG (CB_SCTLR_MTCFG_MASK << CB_SCTLR_MTCFG_SHIFT) +#define CB_SCTLR_SHCFG (CB_SCTLR_SHCFG_MASK << CB_SCTLR_SHCFG_SHIFT) +#define CB_SCTLR_RACFG (CB_SCTLR_RACFG_MASK << CB_SCTLR_RACFG_SHIFT) +#define CB_SCTLR_WACFG (CB_SCTLR_WACFG_MASK << CB_SCTLR_WACFG_SHIFT) +#define CB_SCTLR_NSCFG (CB_SCTLR_NSCFG_MASK << CB_SCTLR_NSCFG_SHIFT) + +/* Invalidate TLB by ASID: CB_TLBIASID */ +#define CB_TLBIASID_ASID (CB_TLBIASID_ASID_MASK << CB_TLBIASID_ASID_SHIFT) + +/* Invalidate TLB by VA: CB_TLBIVA */ +#define CB_TLBIVA_ASID (CB_TLBIVA_ASID_MASK << CB_TLBIVA_ASID_SHIFT) +#define CB_TLBIVA_VA (CB_TLBIVA_VA_MASK << CB_TLBIVA_VA_SHIFT) + +/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */ +#define CB_TLBIVAA_VA (CB_TLBIVAA_VA_MASK << CB_TLBIVAA_VA_SHIFT) + +/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */ +#define CB_TLBIVAAL_VA (CB_TLBIVAAL_VA_MASK << CB_TLBIVAAL_VA_SHIFT) + +/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */ +#define CB_TLBIVAL_ASID (CB_TLBIVAL_ASID_MASK << CB_TLBIVAL_ASID_SHIFT) +#define CB_TLBIVAL_VA (CB_TLBIVAL_VA_MASK << CB_TLBIVAL_VA_SHIFT) + +/* TLB Status: CB_TLBSTATUS */ +#define CB_TLBSTATUS_SACTIVE (CB_TLBSTATUS_SACTIVE_MASK << \ + CB_TLBSTATUS_SACTIVE_SHIFT) + +/* Translation Table Base Control Register: CB_TTBCR */ +#define CB_TTBCR_EAE (CB_TTBCR_EAE_MASK << CB_TTBCR_EAE_SHIFT) + +#define CB_TTBR0_ADDR (CB_TTBR0_ADDR_MASK << CB_TTBR0_ADDR_SHIFT) + +#ifdef CONFIG_IOMMU_LPAE +/* Translation Table Base Register: CB_TTBR */ +#define CB_TTBR0_ASID (CB_TTBR0_ASID_MASK << CB_TTBR0_ASID_SHIFT) +#define CB_TTBR1_ASID (CB_TTBR1_ASID_MASK << CB_TTBR1_ASID_SHIFT) + +/* Translation Table Base Control Register: CB_TTBCR */ +#define CB_TTBCR_T0SZ (CB_TTBCR_T0SZ_MASK << CB_TTBCR_T0SZ_SHIFT) +#define CB_TTBCR_T1SZ (CB_TTBCR_T1SZ_MASK << CB_TTBCR_T1SZ_SHIFT) +#define CB_TTBCR_EPD0 (CB_TTBCR_EPD0_MASK << CB_TTBCR_EPD0_SHIFT) +#define CB_TTBCR_EPD1 (CB_TTBCR_EPD1_MASK << CB_TTBCR_EPD1_SHIFT) +#define CB_TTBCR_IRGN0 (CB_TTBCR_IRGN0_MASK << CB_TTBCR_IRGN0_SHIFT) +#define CB_TTBCR_IRGN1 (CB_TTBCR_IRGN1_MASK << CB_TTBCR_IRGN1_SHIFT) +#define CB_TTBCR_ORGN0 (CB_TTBCR_ORGN0_MASK << CB_TTBCR_ORGN0_SHIFT) +#define CB_TTBCR_ORGN1 (CB_TTBCR_ORGN1_MASK << CB_TTBCR_ORGN1_SHIFT) +#define CB_TTBCR_NSCFG0 (CB_TTBCR_NSCFG0_MASK << CB_TTBCR_NSCFG0_SHIFT) +#define CB_TTBCR_NSCFG1 (CB_TTBCR_NSCFG1_MASK << CB_TTBCR_NSCFG1_SHIFT) +#define CB_TTBCR_SH0 (CB_TTBCR_SH0_MASK << CB_TTBCR_SH0_SHIFT) +#define CB_TTBCR_SH1 (CB_TTBCR_SH1_MASK << CB_TTBCR_SH1_SHIFT) +#define CB_TTBCR_A1 (CB_TTBCR_A1_MASK << CB_TTBCR_A1_SHIFT) + +#else + +/* Translation Table Base Register 0: CB_TTBR0 */ +#define CB_TTBR0_IRGN1 (CB_TTBR0_IRGN1_MASK << CB_TTBR0_IRGN1_SHIFT) +#define CB_TTBR0_S (CB_TTBR0_S_MASK << CB_TTBR0_S_SHIFT) +#define CB_TTBR0_RGN (CB_TTBR0_RGN_MASK << CB_TTBR0_RGN_SHIFT) +#define CB_TTBR0_NOS (CB_TTBR0_NOS_MASK << CB_TTBR0_NOS_SHIFT) +#define CB_TTBR0_IRGN0 (CB_TTBR0_IRGN0_MASK << CB_TTBR0_IRGN0_SHIFT) + +/* Translation Table Base Register 1: CB_TTBR1 */ +#define CB_TTBR1_IRGN1 (CB_TTBR1_IRGN1_MASK << CB_TTBR1_IRGN1_SHIFT) +#define CB_TTBR1_S (CB_TTBR1_S_MASK << CB_TTBR1_S_SHIFT) +#define CB_TTBR1_RGN (CB_TTBR1_RGN_MASK << CB_TTBR1_RGN_SHIFT) +#define CB_TTBR1_NOS (CB_TTBR1_NOS_MASK << CB_TTBR1_NOS_SHIFT) +#define CB_TTBR1_IRGN0 (CB_TTBR1_IRGN0_MASK << CB_TTBR1_IRGN0_SHIFT) +#endif + +/* Global Register Masks */ +/* Configuration Register 0 */ +#define CR0_NSCFG_MASK 0x03 +#define CR0_WACFG_MASK 0x03 +#define CR0_RACFG_MASK 0x03 +#define CR0_SHCFG_MASK 0x03 +#define CR0_SMCFCFG_MASK 0x01 +#define NSCR0_SMCFCFG_MASK 0x01 +#define CR0_MTCFG_MASK 0x01 +#define CR0_MEMATTR_MASK 0x0F +#define CR0_BSU_MASK 0x03 +#define CR0_FB_MASK 0x01 +#define CR0_PTM_MASK 0x01 +#define CR0_VMIDPNE_MASK 0x01 +#define CR0_USFCFG_MASK 0x01 +#define NSCR0_USFCFG_MASK 0x01 +#define CR0_GSE_MASK 0x01 +#define CR0_STALLD_MASK 0x01 +#define NSCR0_STALLD_MASK 0x01 +#define CR0_TRANSIENTCFG_MASK 0x03 +#define CR0_GCFGFIE_MASK 0x01 +#define NSCR0_GCFGFIE_MASK 0x01 +#define CR0_GCFGFRE_MASK 0x01 +#define NSCR0_GCFGFRE_MASK 0x01 +#define CR0_GFIE_MASK 0x01 +#define NSCR0_GFIE_MASK 0x01 +#define CR0_GFRE_MASK 0x01 +#define NSCR0_GFRE_MASK 0x01 +#define CR0_CLIENTPD_MASK 0x01 +#define NSCR0_CLIENTPD_MASK 0x01 + +/* ACR */ +#define ACR_SMTNMC_BPTLBEN_MASK 0x01 +#define ACR_MMUDIS_BPTLBEN_MASK 0x01 +#define ACR_S2CR_BPTLBEN_MASK 0x01 + +/* NSACR */ +#define NSACR_SMTNMC_BPTLBEN_MASK 0x01 +#define NSACR_MMUDIS_BPTLBEN_MASK 0x01 +#define NSACR_S2CR_BPTLBEN_MASK 0x01 + +/* Configuration Register 2 */ +#define CR2_BPVMID_MASK 0xFF + +/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */ +#define GATS1PR_ADDR_MASK 0xFFFFF +#define GATS1PR_NDX_MASK 0xFF + +/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */ +#define GATS1PW_ADDR_MASK 0xFFFFF +#define GATS1PW_NDX_MASK 0xFF + +/* Global Address Translation, Stage 1, User Read: GATS1UR */ +#define GATS1UR_ADDR_MASK 0xFFFFF +#define GATS1UR_NDX_MASK 0xFF + +/* Global Address Translation, Stage 1, User Write: GATS1UW */ +#define GATS1UW_ADDR_MASK 0xFFFFF +#define GATS1UW_NDX_MASK 0xFF + +/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */ +#define GATS12PR_ADDR_MASK 0xFFFFF +#define GATS12PR_NDX_MASK 0xFF + +/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */ +#define GATS12PW_ADDR_MASK 0xFFFFF +#define GATS12PW_NDX_MASK 0xFF + +/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */ +#define GATS12UR_ADDR_MASK 0xFFFFF +#define GATS12UR_NDX_MASK 0xFF + +/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */ +#define GATS12UW_ADDR_MASK 0xFFFFF +#define GATS12UW_NDX_MASK 0xFF + +/* Global Address Translation Status Register: GATSR */ +#define GATSR_ACTIVE_MASK 0x01 + +/* Global Fault Address Register: GFAR */ +#define GFAR_FADDR_MASK 0xFFFFFFFF + +/* Global Fault Status Register: GFSR */ +#define GFSR_ICF_MASK 0x01 +#define GFSR_USF_MASK 0x01 +#define GFSR_SMCF_MASK 0x01 +#define GFSR_UCBF_MASK 0x01 +#define GFSR_UCIF_MASK 0x01 +#define GFSR_CAF_MASK 0x01 +#define GFSR_EF_MASK 0x01 +#define GFSR_PF_MASK 0x01 +#define GFSR_MULTI_MASK 0x01 + +/* Global Fault Syndrome Register 0: GFSYNR0 */ +#define GFSYNR0_NESTED_MASK 0x01 +#define GFSYNR0_WNR_MASK 0x01 +#define GFSYNR0_PNU_MASK 0x01 +#define GFSYNR0_IND_MASK 0x01 +#define GFSYNR0_NSSTATE_MASK 0x01 +#define GFSYNR0_NSATTR_MASK 0x01 + +/* Global Fault Syndrome Register 1: GFSYNR1 */ +#define GFSYNR1_SID_MASK 0x7FFF +#define GFSYNr1_SSD_IDX_MASK 0x7FFF + +/* Global Physical Address Register: GPAR */ +#define GPAR_F_MASK 0x01 +#define GPAR_SS_MASK 0x01 +#define GPAR_OUTER_MASK 0x03 +#define GPAR_INNER_MASK 0x03 +#define GPAR_SH_MASK 0x01 +#define GPAR_NS_MASK 0x01 +#define GPAR_NOS_MASK 0x01 +#define GPAR_PA_MASK 0xFFFFF +#define GPAR_TF_MASK 0x01 +#define GPAR_AFF_MASK 0x01 +#define GPAR_PF_MASK 0x01 +#define GPAR_EF_MASK 0x01 +#define GPAR_TLBMCF_MASK 0x01 +#define GPAR_TLBLKF_MASK 0x01 +#define GPAR_UCBF_MASK 0x01 + +/* Identification Register: IDR0 */ +#define IDR0_NUMSMRG_MASK 0xFF +#define IDR0_NUMSIDB_MASK 0x0F +#define IDR0_BTM_MASK 0x01 +#define IDR0_CTTW_MASK 0x01 +#define IDR0_NUMIPRT_MASK 0xFF +#define IDR0_PTFS_MASK 0x01 +#define IDR0_SMS_MASK 0x01 +#define IDR0_NTS_MASK 0x01 +#define IDR0_S2TS_MASK 0x01 +#define IDR0_S1TS_MASK 0x01 +#define IDR0_SES_MASK 0x01 + +/* Identification Register: IDR1 */ +#define IDR1_NUMCB_MASK 0xFF +#define IDR1_NUMSSDNDXB_MASK 0x0F +#define IDR1_SSDTP_MASK 0x01 +#define IDR1_SMCD_MASK 0x01 +#define IDR1_NUMS2CB_MASK 0xFF +#define IDR1_NUMPAGENDXB_MASK 0x07 +#define IDR1_PAGESIZE_MASK 0x01 + +/* Identification Register: IDR2 */ +#define IDR2_IAS_MASK 0x0F +#define IDR2_OAS_MASK 0x0F + +/* Identification Register: IDR7 */ +#define IDR7_MINOR_MASK 0x0F +#define IDR7_MAJOR_MASK 0x0F + +/* Stream to Context Register: S2CR */ +#define S2CR_CBNDX_MASK 0xFF +#define S2CR_SHCFG_MASK 0x03 +#define S2CR_MTCFG_MASK 0x01 +#define S2CR_MEMATTR_MASK 0x0F +#define S2CR_TYPE_MASK 0x03 +#define S2CR_NSCFG_MASK 0x03 +#define S2CR_RACFG_MASK 0x03 +#define S2CR_WACFG_MASK 0x03 +#define S2CR_PRIVCFG_MASK 0x03 +#define S2CR_INSTCFG_MASK 0x03 +#define S2CR_TRANSIENTCFG_MASK 0x03 +#define S2CR_VMID_MASK 0xFF +#define S2CR_BSU_MASK 0x03 +#define S2CR_FB_MASK 0x01 + +/* Stream Match Register: SMR */ +#define SMR_ID_MASK 0x7FFF +#define SMR_MASK_MASK 0x7FFF +#define SMR_VALID_MASK 0x01 + +/* Global TLB Status: TLBGSTATUS */ +#define TLBGSTATUS_GSACTIVE_MASK 0x01 + +/* Invalidate Hyp TLB by VA: TLBIVAH */ +#define TLBIVAH_ADDR_MASK 0xFFFFF + +/* Invalidate TLB by VMID: TLBIVMID */ +#define TLBIVMID_VMID_MASK 0xFF + +/* Global Register Space 1 Mask */ +/* Context Bank Attribute Register: CBAR */ +#define CBAR_VMID_MASK 0xFF +#define CBAR_CBNDX_MASK 0x03 +#define CBAR_BPSHCFG_MASK 0x03 +#define CBAR_HYPC_MASK 0x01 +#define CBAR_FB_MASK 0x01 +#define CBAR_MEMATTR_MASK 0x0F +#define CBAR_TYPE_MASK 0x03 +#define CBAR_BSU_MASK 0x03 +#define CBAR_RACFG_MASK 0x03 +#define CBAR_WACFG_MASK 0x03 +#define CBAR_IRPTNDX_MASK 0xFF + +/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */ +#define CBFRSYNRA_SID_MASK 0x7FFF + +/* Implementation defined register space masks */ +#define MICRO_MMU_CTRL_RESERVED_MASK 0x03 +#define MICRO_MMU_CTRL_HALT_REQ_MASK 0x01 +#define MICRO_MMU_CTRL_IDLE_MASK 0x01 + +/* Stage 1 Context Bank Format Masks */ +/* Auxiliary Control Register: CB_ACTLR */ +#define CB_ACTLR_REQPRIORITY_MASK 0x3 +#define CB_ACTLR_REQPRIORITYCFG_MASK 0x1 +#define CB_ACTLR_PRIVCFG_MASK 0x3 +#define CB_ACTLR_BPRCOSH_MASK 0x1 +#define CB_ACTLR_BPRCISH_MASK 0x1 +#define CB_ACTLR_BPRCNSH_MASK 0x1 + +/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */ +#define CB_ATS1PR_ADDR_MASK 0xFFFFF + +/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */ +#define CB_ATS1PW_ADDR_MASK 0xFFFFF + +/* Address Translation, Stage 1, User Read: CB_ATS1UR */ +#define CB_ATS1UR_ADDR_MASK 0xFFFFF + +/* Address Translation, Stage 1, User Write: CB_ATS1UW */ +#define CB_ATS1UW_ADDR_MASK 0xFFFFF + +/* Address Translation Status Register: CB_ATSR */ +#define CB_ATSR_ACTIVE_MASK 0x01 + +/* Context ID Register: CB_CONTEXTIDR */ +#define CB_CONTEXTIDR_ASID_MASK 0xFF +#define CB_CONTEXTIDR_PROCID_MASK 0xFFFFFF + +/* Fault Address Register: CB_FAR */ +#define CB_FAR_FADDR_MASK 0xFFFFFFFF + +/* Fault Status Register: CB_FSR */ +#define CB_FSR_TF_MASK 0x01 +#define CB_FSR_AFF_MASK 0x01 +#define CB_FSR_PF_MASK 0x01 +#define CB_FSR_EF_MASK 0x01 +#define CB_FSR_TLBMCF_MASK 0x01 +#define CB_FSR_TLBLKF_MASK 0x01 +#define CB_FSR_SS_MASK 0x01 +#define CB_FSR_MULTI_MASK 0x01 + +/* Fault Syndrome Register 0: CB_FSYNR0 */ +#define CB_FSYNR0_PLVL_MASK 0x03 +#define CB_FSYNR0_S1PTWF_MASK 0x01 +#define CB_FSYNR0_WNR_MASK 0x01 +#define CB_FSYNR0_PNU_MASK 0x01 +#define CB_FSYNR0_IND_MASK 0x01 +#define CB_FSYNR0_NSSTATE_MASK 0x01 +#define CB_FSYNR0_NSATTR_MASK 0x01 +#define CB_FSYNR0_ATOF_MASK 0x01 +#define CB_FSYNR0_PTWF_MASK 0x01 +#define CB_FSYNR0_AFR_MASK 0x01 +#define CB_FSYNR0_S1CBNDX_MASK 0xFF + +/* Normal Memory Remap Register: CB_NMRR */ +#define CB_NMRR_IR0_MASK 0x03 +#define CB_NMRR_IR1_MASK 0x03 +#define CB_NMRR_IR2_MASK 0x03 +#define CB_NMRR_IR3_MASK 0x03 +#define CB_NMRR_IR4_MASK 0x03 +#define CB_NMRR_IR5_MASK 0x03 +#define CB_NMRR_IR6_MASK 0x03 +#define CB_NMRR_IR7_MASK 0x03 +#define CB_NMRR_OR0_MASK 0x03 +#define CB_NMRR_OR1_MASK 0x03 +#define CB_NMRR_OR2_MASK 0x03 +#define CB_NMRR_OR3_MASK 0x03 +#define CB_NMRR_OR4_MASK 0x03 +#define CB_NMRR_OR5_MASK 0x03 +#define CB_NMRR_OR6_MASK 0x03 +#define CB_NMRR_OR7_MASK 0x03 + +/* Physical Address Register: CB_PAR */ +#define CB_PAR_F_MASK 0x01 +#define CB_PAR_SS_MASK 0x01 +#define CB_PAR_OUTER_MASK 0x03 +#define CB_PAR_INNER_MASK 0x07 +#define CB_PAR_SH_MASK 0x01 +#define CB_PAR_NS_MASK 0x01 +#define CB_PAR_NOS_MASK 0x01 +#define CB_PAR_PA_MASK 0xFFFFF +#define CB_PAR_TF_MASK 0x01 +#define CB_PAR_AFF_MASK 0x01 +#define CB_PAR_PF_MASK 0x01 +#define CB_PAR_EF_MASK 0x01 +#define CB_PAR_TLBMCF_MASK 0x01 +#define CB_PAR_TLBLKF_MASK 0x01 +#define CB_PAR_ATOT_MASK 0x01ULL +#define CB_PAR_PLVL_MASK 0x03ULL +#define CB_PAR_STAGE_MASK 0x01ULL + +/* Primary Region Remap Register: CB_PRRR */ +#define CB_PRRR_TR0_MASK 0x03 +#define CB_PRRR_TR1_MASK 0x03 +#define CB_PRRR_TR2_MASK 0x03 +#define CB_PRRR_TR3_MASK 0x03 +#define CB_PRRR_TR4_MASK 0x03 +#define CB_PRRR_TR5_MASK 0x03 +#define CB_PRRR_TR6_MASK 0x03 +#define CB_PRRR_TR7_MASK 0x03 +#define CB_PRRR_DS0_MASK 0x01 +#define CB_PRRR_DS1_MASK 0x01 +#define CB_PRRR_NS0_MASK 0x01 +#define CB_PRRR_NS1_MASK 0x01 +#define CB_PRRR_NOS0_MASK 0x01 +#define CB_PRRR_NOS1_MASK 0x01 +#define CB_PRRR_NOS2_MASK 0x01 +#define CB_PRRR_NOS3_MASK 0x01 +#define CB_PRRR_NOS4_MASK 0x01 +#define CB_PRRR_NOS5_MASK 0x01 +#define CB_PRRR_NOS6_MASK 0x01 +#define CB_PRRR_NOS7_MASK 0x01 + +/* Transaction Resume: CB_RESUME */ +#define CB_RESUME_TNR_MASK 0x01 + +/* System Control Register: CB_SCTLR */ +#define CB_SCTLR_M_MASK 0x01 +#define CB_SCTLR_TRE_MASK 0x01 +#define CB_SCTLR_AFE_MASK 0x01 +#define CB_SCTLR_AFFD_MASK 0x01 +#define CB_SCTLR_E_MASK 0x01 +#define CB_SCTLR_CFRE_MASK 0x01 +#define CB_SCTLR_CFIE_MASK 0x01 +#define CB_SCTLR_CFCFG_MASK 0x01 +#define CB_SCTLR_HUPCF_MASK 0x01 +#define CB_SCTLR_WXN_MASK 0x01 +#define CB_SCTLR_UWXN_MASK 0x01 +#define CB_SCTLR_ASIDPNE_MASK 0x01 +#define CB_SCTLR_TRANSIENTCFG_MASK 0x03 +#define CB_SCTLR_MEMATTR_MASK 0x0F +#define CB_SCTLR_MTCFG_MASK 0x01 +#define CB_SCTLR_SHCFG_MASK 0x03 +#define CB_SCTLR_RACFG_MASK 0x03 +#define CB_SCTLR_WACFG_MASK 0x03 +#define CB_SCTLR_NSCFG_MASK 0x03 + +/* Invalidate TLB by ASID: CB_TLBIASID */ +#define CB_TLBIASID_ASID_MASK 0xFF + +/* Invalidate TLB by VA: CB_TLBIVA */ +#define CB_TLBIVA_ASID_MASK 0xFF +#define CB_TLBIVA_VA_MASK 0xFFFFF + +/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */ +#define CB_TLBIVAA_VA_MASK 0xFFFFF + +/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */ +#define CB_TLBIVAAL_VA_MASK 0xFFFFF + +/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */ +#define CB_TLBIVAL_ASID_MASK 0xFF +#define CB_TLBIVAL_VA_MASK 0xFFFFF + +/* TLB Status: CB_TLBSTATUS */ +#define CB_TLBSTATUS_SACTIVE_MASK 0x01 + +/* Translation Table Base Control Register: CB_TTBCR */ +#define CB_TTBCR_T0SZ_MASK 0x07 +#define CB_TTBCR_T1SZ_MASK 0x07 +#define CB_TTBCR_EPD0_MASK 0x01 +#define CB_TTBCR_EPD1_MASK 0x01 +#define CB_TTBCR_IRGN0_MASK 0x03 +#define CB_TTBCR_IRGN1_MASK 0x03 +#define CB_TTBCR_ORGN0_MASK 0x03 +#define CB_TTBCR_ORGN1_MASK 0x03 +#define CB_TTBCR_NSCFG0_MASK 0x01 +#define CB_TTBCR_NSCFG1_MASK 0x01 +#define CB_TTBCR_SH0_MASK 0x03 +#define CB_TTBCR_SH1_MASK 0x03 +#define CB_TTBCR_A1_MASK 0x01 +#define CB_TTBCR_EAE_MASK 0x01 + +/* Translation Table Base Register 0/1: CB_TTBR */ +#ifdef CONFIG_IOMMU_LPAE +#define CB_TTBR0_ADDR_MASK 0x7FFFFFFFFULL +#define CB_TTBR0_ASID_MASK 0xFF +#define CB_TTBR1_ASID_MASK 0xFF +#else +#define CB_TTBR0_IRGN1_MASK 0x01 +#define CB_TTBR0_S_MASK 0x01 +#define CB_TTBR0_RGN_MASK 0x01 +#define CB_TTBR0_NOS_MASK 0x01 +#define CB_TTBR0_IRGN0_MASK 0x01 +#define CB_TTBR0_ADDR_MASK 0xFFFFFF + +#define CB_TTBR1_IRGN1_MASK 0x1 +#define CB_TTBR1_S_MASK 0x1 +#define CB_TTBR1_RGN_MASK 0x1 +#define CB_TTBR1_NOS_MASK 0X1 +#define CB_TTBR1_IRGN0_MASK 0X1 +#endif + +/* Global Register Shifts */ +/* Configuration Register: CR0 */ +#define CR0_NSCFG_SHIFT 28 +#define CR0_WACFG_SHIFT 26 +#define CR0_RACFG_SHIFT 24 +#define CR0_SHCFG_SHIFT 22 +#define CR0_SMCFCFG_SHIFT 21 +#define NSCR0_SMCFCFG_SHIFT 21 +#define CR0_MTCFG_SHIFT 20 +#define CR0_MEMATTR_SHIFT 16 +#define CR0_BSU_SHIFT 14 +#define CR0_FB_SHIFT 13 +#define CR0_PTM_SHIFT 12 +#define CR0_VMIDPNE_SHIFT 11 +#define CR0_USFCFG_SHIFT 10 +#define NSCR0_USFCFG_SHIFT 10 +#define CR0_GSE_SHIFT 9 +#define CR0_STALLD_SHIFT 8 +#define NSCR0_STALLD_SHIFT 8 +#define CR0_TRANSIENTCFG_SHIFT 6 +#define CR0_GCFGFIE_SHIFT 5 +#define NSCR0_GCFGFIE_SHIFT 5 +#define CR0_GCFGFRE_SHIFT 4 +#define NSCR0_GCFGFRE_SHIFT 4 +#define CR0_GFIE_SHIFT 2 +#define NSCR0_GFIE_SHIFT 2 +#define CR0_GFRE_SHIFT 1 +#define NSCR0_GFRE_SHIFT 1 +#define CR0_CLIENTPD_SHIFT 0 +#define NSCR0_CLIENTPD_SHIFT 0 + +/* ACR */ +#define ACR_SMTNMC_BPTLBEN_SHIFT 8 +#define ACR_MMUDIS_BPTLBEN_SHIFT 9 +#define ACR_S2CR_BPTLBEN_SHIFT 10 + +/* NSACR */ +#define NSACR_SMTNMC_BPTLBEN_SHIFT 8 +#define NSACR_MMUDIS_BPTLBEN_SHIFT 9 +#define NSACR_S2CR_BPTLBEN_SHIFT 10 + +/* Configuration Register: CR2 */ +#define CR2_BPVMID_SHIFT 0 + +/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */ +#define GATS1PR_ADDR_SHIFT 12 +#define GATS1PR_NDX_SHIFT 0 + +/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */ +#define GATS1PW_ADDR_SHIFT 12 +#define GATS1PW_NDX_SHIFT 0 + +/* Global Address Translation, Stage 1, User Read: GATS1UR */ +#define GATS1UR_ADDR_SHIFT 12 +#define GATS1UR_NDX_SHIFT 0 + +/* Global Address Translation, Stage 1, User Write: GATS1UW */ +#define GATS1UW_ADDR_SHIFT 12 +#define GATS1UW_NDX_SHIFT 0 + +/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */ +#define GATS12PR_ADDR_SHIFT 12 +#define GATS12PR_NDX_SHIFT 0 + +/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS12PW */ +#define GATS12PW_ADDR_SHIFT 12 +#define GATS12PW_NDX_SHIFT 0 + +/* Global Address Translation, Stage 1 and 2, User Read: GATS12UR */ +#define GATS12UR_ADDR_SHIFT 12 +#define GATS12UR_NDX_SHIFT 0 + +/* Global Address Translation, Stage 1 and 2, User Write: GATS12UW */ +#define GATS12UW_ADDR_SHIFT 12 +#define GATS12UW_NDX_SHIFT 0 + +/* Global Address Translation Status Register: GATSR */ +#define GATSR_ACTIVE_SHIFT 0 + +/* Global Fault Address Register: GFAR */ +#define GFAR_FADDR_SHIFT 0 + +/* Global Fault Status Register: GFSR */ +#define GFSR_ICF_SHIFT 0 +#define GFSR_USF_SHIFT 1 +#define GFSR_SMCF_SHIFT 2 +#define GFSR_UCBF_SHIFT 3 +#define GFSR_UCIF_SHIFT 4 +#define GFSR_CAF_SHIFT 5 +#define GFSR_EF_SHIFT 6 +#define GFSR_PF_SHIFT 7 +#define GFSR_MULTI_SHIFT 31 + +/* Global Fault Syndrome Register 0: GFSYNR0 */ +#define GFSYNR0_NESTED_SHIFT 0 +#define GFSYNR0_WNR_SHIFT 1 +#define GFSYNR0_PNU_SHIFT 2 +#define GFSYNR0_IND_SHIFT 3 +#define GFSYNR0_NSSTATE_SHIFT 4 +#define GFSYNR0_NSATTR_SHIFT 5 + +/* Global Fault Syndrome Register 1: GFSYNR1 */ +#define GFSYNR1_SID_SHIFT 0 + +/* Global Physical Address Register: GPAR */ +#define GPAR_F_SHIFT 0 +#define GPAR_SS_SHIFT 1 +#define GPAR_OUTER_SHIFT 2 +#define GPAR_INNER_SHIFT 4 +#define GPAR_SH_SHIFT 7 +#define GPAR_NS_SHIFT 9 +#define GPAR_NOS_SHIFT 10 +#define GPAR_PA_SHIFT 12 +#define GPAR_TF_SHIFT 1 +#define GPAR_AFF_SHIFT 2 +#define GPAR_PF_SHIFT 3 +#define GPAR_EF_SHIFT 4 +#define GPAR_TLCMCF_SHIFT 5 +#define GPAR_TLBLKF_SHIFT 6 +#define GFAR_UCBF_SHIFT 30 + +/* Identification Register: IDR0 */ +#define IDR0_NUMSMRG_SHIFT 0 +#define IDR0_NUMSIDB_SHIFT 9 +#define IDR0_BTM_SHIFT 13 +#define IDR0_CTTW_SHIFT 14 +#define IDR0_NUMIRPT_SHIFT 16 +#define IDR0_PTFS_SHIFT 24 +#define IDR0_SMS_SHIFT 27 +#define IDR0_NTS_SHIFT 28 +#define IDR0_S2TS_SHIFT 29 +#define IDR0_S1TS_SHIFT 30 +#define IDR0_SES_SHIFT 31 + +/* Identification Register: IDR1 */ +#define IDR1_NUMCB_SHIFT 0 +#define IDR1_NUMSSDNDXB_SHIFT 8 +#define IDR1_SSDTP_SHIFT 12 +#define IDR1_SMCD_SHIFT 15 +#define IDR1_NUMS2CB_SHIFT 16 +#define IDR1_NUMPAGENDXB_SHIFT 28 +#define IDR1_PAGESIZE_SHIFT 31 + +/* Identification Register: IDR2 */ +#define IDR2_IAS_SHIFT 0 +#define IDR2_OAS_SHIFT 4 + +/* Identification Register: IDR7 */ +#define IDR7_MINOR_SHIFT 0 +#define IDR7_MAJOR_SHIFT 4 + +/* Stream to Context Register: S2CR */ +#define S2CR_CBNDX_SHIFT 0 +#define s2CR_SHCFG_SHIFT 8 +#define S2CR_MTCFG_SHIFT 11 +#define S2CR_MEMATTR_SHIFT 12 +#define S2CR_TYPE_SHIFT 16 +#define S2CR_NSCFG_SHIFT 18 +#define S2CR_RACFG_SHIFT 20 +#define S2CR_WACFG_SHIFT 22 +#define S2CR_PRIVCFG_SHIFT 24 +#define S2CR_INSTCFG_SHIFT 26 +#define S2CR_TRANSIENTCFG_SHIFT 28 +#define S2CR_VMID_SHIFT 0 +#define S2CR_BSU_SHIFT 24 +#define S2CR_FB_SHIFT 26 + +/* Stream Match Register: SMR */ +#define SMR_ID_SHIFT 0 +#define SMR_MASK_SHIFT 16 +#define SMR_VALID_SHIFT 31 + +/* Global TLB Status: TLBGSTATUS */ +#define TLBGSTATUS_GSACTIVE_SHIFT 0 + +/* Invalidate Hyp TLB by VA: TLBIVAH */ +#define TLBIVAH_ADDR_SHIFT 12 + +/* Invalidate TLB by VMID: TLBIVMID */ +#define TLBIVMID_VMID_SHIFT 0 + +/* Context Bank Attribute Register: CBAR */ +#define CBAR_VMID_SHIFT 0 +#define CBAR_CBNDX_SHIFT 8 +#define CBAR_BPSHCFG_SHIFT 8 +#define CBAR_HYPC_SHIFT 10 +#define CBAR_FB_SHIFT 11 +#define CBAR_MEMATTR_SHIFT 12 +#define CBAR_TYPE_SHIFT 16 +#define CBAR_BSU_SHIFT 18 +#define CBAR_RACFG_SHIFT 20 +#define CBAR_WACFG_SHIFT 22 +#define CBAR_IRPTNDX_SHIFT 24 + +/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */ +#define CBFRSYNRA_SID_SHIFT 0 + +/* Implementation defined register space shift */ +#define MICRO_MMU_CTRL_RESERVED_SHIFT 0x00 +#define MICRO_MMU_CTRL_HALT_REQ_SHIFT 0x02 +#define MICRO_MMU_CTRL_IDLE_SHIFT 0x03 + +/* Stage 1 Context Bank Format Shifts */ +/* Auxiliary Control Register: CB_ACTLR */ +#define CB_ACTLR_REQPRIORITY_SHIFT 0 +#define CB_ACTLR_REQPRIORITYCFG_SHIFT 4 +#define CB_ACTLR_PRIVCFG_SHIFT 8 +#define CB_ACTLR_BPRCOSH_SHIFT 28 +#define CB_ACTLR_BPRCISH_SHIFT 29 +#define CB_ACTLR_BPRCNSH_SHIFT 30 + +/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */ +#define CB_ATS1PR_ADDR_SHIFT 12 + +/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */ +#define CB_ATS1PW_ADDR_SHIFT 12 + +/* Address Translation, Stage 1, User Read: CB_ATS1UR */ +#define CB_ATS1UR_ADDR_SHIFT 12 + +/* Address Translation, Stage 1, User Write: CB_ATS1UW */ +#define CB_ATS1UW_ADDR_SHIFT 12 + +/* Address Translation Status Register: CB_ATSR */ +#define CB_ATSR_ACTIVE_SHIFT 0 + +/* Context ID Register: CB_CONTEXTIDR */ +#define CB_CONTEXTIDR_ASID_SHIFT 0 +#define CB_CONTEXTIDR_PROCID_SHIFT 8 + +/* Fault Address Register: CB_FAR */ +#define CB_FAR_FADDR_SHIFT 0 + +/* Fault Status Register: CB_FSR */ +#define CB_FSR_TF_SHIFT 1 +#define CB_FSR_AFF_SHIFT 2 +#define CB_FSR_PF_SHIFT 3 +#define CB_FSR_EF_SHIFT 4 +#define CB_FSR_TLBMCF_SHIFT 5 +#define CB_FSR_TLBLKF_SHIFT 6 +#define CB_FSR_SS_SHIFT 30 +#define CB_FSR_MULTI_SHIFT 31 + +/* Fault Syndrome Register 0: CB_FSYNR0 */ +#define CB_FSYNR0_PLVL_SHIFT 0 +#define CB_FSYNR0_S1PTWF_SHIFT 3 +#define CB_FSYNR0_WNR_SHIFT 4 +#define CB_FSYNR0_PNU_SHIFT 5 +#define CB_FSYNR0_IND_SHIFT 6 +#define CB_FSYNR0_NSSTATE_SHIFT 7 +#define CB_FSYNR0_NSATTR_SHIFT 8 +#define CB_FSYNR0_ATOF_SHIFT 9 +#define CB_FSYNR0_PTWF_SHIFT 10 +#define CB_FSYNR0_AFR_SHIFT 11 +#define CB_FSYNR0_S1CBNDX_SHIFT 16 + +/* Normal Memory Remap Register: CB_NMRR */ +#define CB_NMRR_IR0_SHIFT 0 +#define CB_NMRR_IR1_SHIFT 2 +#define CB_NMRR_IR2_SHIFT 4 +#define CB_NMRR_IR3_SHIFT 6 +#define CB_NMRR_IR4_SHIFT 8 +#define CB_NMRR_IR5_SHIFT 10 +#define CB_NMRR_IR6_SHIFT 12 +#define CB_NMRR_IR7_SHIFT 14 +#define CB_NMRR_OR0_SHIFT 16 +#define CB_NMRR_OR1_SHIFT 18 +#define CB_NMRR_OR2_SHIFT 20 +#define CB_NMRR_OR3_SHIFT 22 +#define CB_NMRR_OR4_SHIFT 24 +#define CB_NMRR_OR5_SHIFT 26 +#define CB_NMRR_OR6_SHIFT 28 +#define CB_NMRR_OR7_SHIFT 30 + +/* Physical Address Register: CB_PAR */ +#define CB_PAR_F_SHIFT 0 +#define CB_PAR_SS_SHIFT 1 +#define CB_PAR_OUTER_SHIFT 2 +#define CB_PAR_INNER_SHIFT 4 +#define CB_PAR_SH_SHIFT 7 +#define CB_PAR_NS_SHIFT 9 +#define CB_PAR_NOS_SHIFT 10 +#define CB_PAR_PA_SHIFT 12 +#define CB_PAR_TF_SHIFT 1 +#define CB_PAR_AFF_SHIFT 2 +#define CB_PAR_PF_SHIFT 3 +#define CB_PAR_EF_SHIFT 4 +#define CB_PAR_TLBMCF_SHIFT 5 +#define CB_PAR_TLBLKF_SHIFT 6 +#define CB_PAR_ATOT_SHIFT 31 +#define CB_PAR_PLVL_SHIFT 32 +#define CB_PAR_STAGE_SHIFT 35 + +/* Primary Region Remap Register: CB_PRRR */ +#define CB_PRRR_TR0_SHIFT 0 +#define CB_PRRR_TR1_SHIFT 2 +#define CB_PRRR_TR2_SHIFT 4 +#define CB_PRRR_TR3_SHIFT 6 +#define CB_PRRR_TR4_SHIFT 8 +#define CB_PRRR_TR5_SHIFT 10 +#define CB_PRRR_TR6_SHIFT 12 +#define CB_PRRR_TR7_SHIFT 14 +#define CB_PRRR_DS0_SHIFT 16 +#define CB_PRRR_DS1_SHIFT 17 +#define CB_PRRR_NS0_SHIFT 18 +#define CB_PRRR_NS1_SHIFT 19 +#define CB_PRRR_NOS0_SHIFT 24 +#define CB_PRRR_NOS1_SHIFT 25 +#define CB_PRRR_NOS2_SHIFT 26 +#define CB_PRRR_NOS3_SHIFT 27 +#define CB_PRRR_NOS4_SHIFT 28 +#define CB_PRRR_NOS5_SHIFT 29 +#define CB_PRRR_NOS6_SHIFT 30 +#define CB_PRRR_NOS7_SHIFT 31 + +/* Transaction Resume: CB_RESUME */ +#define CB_RESUME_TNR_SHIFT 0 + +/* System Control Register: CB_SCTLR */ +#define CB_SCTLR_M_SHIFT 0 +#define CB_SCTLR_TRE_SHIFT 1 +#define CB_SCTLR_AFE_SHIFT 2 +#define CB_SCTLR_AFFD_SHIFT 3 +#define CB_SCTLR_E_SHIFT 4 +#define CB_SCTLR_CFRE_SHIFT 5 +#define CB_SCTLR_CFIE_SHIFT 6 +#define CB_SCTLR_CFCFG_SHIFT 7 +#define CB_SCTLR_HUPCF_SHIFT 8 +#define CB_SCTLR_WXN_SHIFT 9 +#define CB_SCTLR_UWXN_SHIFT 10 +#define CB_SCTLR_ASIDPNE_SHIFT 12 +#define CB_SCTLR_TRANSIENTCFG_SHIFT 14 +#define CB_SCTLR_MEMATTR_SHIFT 16 +#define CB_SCTLR_MTCFG_SHIFT 20 +#define CB_SCTLR_SHCFG_SHIFT 22 +#define CB_SCTLR_RACFG_SHIFT 24 +#define CB_SCTLR_WACFG_SHIFT 26 +#define CB_SCTLR_NSCFG_SHIFT 28 + +/* Invalidate TLB by ASID: CB_TLBIASID */ +#define CB_TLBIASID_ASID_SHIFT 0 + +/* Invalidate TLB by VA: CB_TLBIVA */ +#define CB_TLBIVA_ASID_SHIFT 0 +#define CB_TLBIVA_VA_SHIFT 12 + +/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */ +#define CB_TLBIVAA_VA_SHIFT 12 + +/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */ +#define CB_TLBIVAAL_VA_SHIFT 12 + +/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */ +#define CB_TLBIVAL_ASID_SHIFT 0 +#define CB_TLBIVAL_VA_SHIFT 12 + +/* TLB Status: CB_TLBSTATUS */ +#define CB_TLBSTATUS_SACTIVE_SHIFT 0 + +/* Translation Table Base Control Register: CB_TTBCR */ +#define CB_TTBCR_T0SZ_SHIFT 0 +#define CB_TTBCR_T1SZ_SHIFT 16 +#define CB_TTBCR_EPD0_SHIFT 4 +#define CB_TTBCR_EPD1_SHIFT 5 +#define CB_TTBCR_NSCFG0_SHIFT 14 +#define CB_TTBCR_NSCFG1_SHIFT 30 +#define CB_TTBCR_EAE_SHIFT 31 +#define CB_TTBCR_IRGN0_SHIFT 8 +#define CB_TTBCR_IRGN1_SHIFT 24 +#define CB_TTBCR_ORGN0_SHIFT 10 +#define CB_TTBCR_ORGN1_SHIFT 26 +#define CB_TTBCR_A1_SHIFT 22 +#define CB_TTBCR_SH0_SHIFT 12 +#define CB_TTBCR_SH1_SHIFT 28 + +/* Translation Table Base Register 0/1: CB_TTBR */ +#ifdef CONFIG_IOMMU_LPAE +#define CB_TTBR0_ADDR_SHIFT 5 +#define CB_TTBR0_ASID_SHIFT 48 +#define CB_TTBR1_ASID_SHIFT 48 +#else +#define CB_TTBR0_IRGN1_SHIFT 0 +#define CB_TTBR0_S_SHIFT 1 +#define CB_TTBR0_RGN_SHIFT 3 +#define CB_TTBR0_NOS_SHIFT 5 +#define CB_TTBR0_IRGN0_SHIFT 6 +#define CB_TTBR0_ADDR_SHIFT 14 + +#define CB_TTBR1_IRGN1_SHIFT 0 +#define CB_TTBR1_S_SHIFT 1 +#define CB_TTBR1_RGN_SHIFT 3 +#define CB_TTBR1_NOS_SHIFT 5 +#define CB_TTBR1_IRGN0_SHIFT 6 +#define CB_TTBR1_ADDR_SHIFT 14 +#endif + +#endif diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.c b/drivers/iommu/qcom/msm_iommu_pagetable.c new file mode 100644 index 000000000000..bd03bab1cba7 --- /dev/null +++ b/drivers/iommu/qcom/msm_iommu_pagetable.c @@ -0,0 +1,645 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/iommu.h> +#include <linux/scatterlist.h> + +#include <asm/cacheflush.h> + +#include "qcom_iommu.h" +#include "msm_iommu_priv.h" +#include <trace/events/kmem.h> +#include "msm_iommu_pagetable.h" + +#define NUM_FL_PTE 4096 +#define NUM_SL_PTE 256 +#define GUARD_PTE 2 +#define NUM_TEX_CLASS 8 + +/* First-level page table bits */ +#define FL_BASE_MASK 0xFFFFFC00 +#define FL_TYPE_TABLE (1 << 0) +#define FL_TYPE_SECT (2 << 0) +#define FL_SUPERSECTION (1 << 18) +#define FL_AP0 (1 << 10) +#define FL_AP1 (1 << 11) +#define FL_AP2 (1 << 15) +#define FL_SHARED (1 << 16) +#define FL_BUFFERABLE (1 << 2) +#define FL_CACHEABLE (1 << 3) +#define FL_TEX0 (1 << 12) +#define FL_OFFSET(va) (((va) & 0xFFF00000) >> 20) +#define FL_NG (1 << 17) + +/* Second-level page table bits */ +#define SL_BASE_MASK_LARGE 0xFFFF0000 +#define SL_BASE_MASK_SMALL 0xFFFFF000 +#define SL_TYPE_LARGE (1 << 0) +#define SL_TYPE_SMALL (2 << 0) +#define SL_AP0 (1 << 4) +#define SL_AP1 (2 << 4) +#define SL_AP2 (1 << 9) +#define SL_SHARED (1 << 10) +#define SL_BUFFERABLE (1 << 2) +#define SL_CACHEABLE (1 << 3) +#define SL_TEX0 (1 << 6) +#define SL_OFFSET(va) (((va) & 0xFF000) >> 12) +#define SL_NG (1 << 11) + +/* Memory type and cache policy attributes */ +#define MT_SO 0 +#define MT_DEV 1 +#define MT_IOMMU_NORMAL 2 +#define CP_NONCACHED 0 +#define CP_WB_WA 1 +#define CP_WT 2 +#define CP_WB_NWA 3 + +/* Sharability attributes of MSM IOMMU mappings */ +#define MSM_IOMMU_ATTR_NON_SH 0x0 +#define MSM_IOMMU_ATTR_SH 0x4 + +/* Cacheability attributes of MSM IOMMU mappings */ +#define MSM_IOMMU_ATTR_NONCACHED 0x0 +#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1 +#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2 +#define MSM_IOMMU_ATTR_CACHED_WT 0x3 + +static int msm_iommu_tex_class[4]; + +/* TEX Remap Registers */ +#define NMRR_ICP(nmrr, n) (((nmrr) & (3 << ((n) * 2))) >> ((n) * 2)) +#define NMRR_OCP(nmrr, n) (((nmrr) & (3 << ((n) * 2 + 16))) >> ((n) * 2 + 16)) + +#define PRRR_NOS(prrr, n) ((prrr) & (1 << ((n) + 24)) ? 1 : 0) +#define PRRR_MT(prrr, n) ((((prrr) & (3 << ((n) * 2))) >> ((n) * 2))) + +static inline void clean_pte(u32 *start, size_t size, int redirect) +{ + if (!redirect) + __dma_flush_area(start, size); +} + +int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt) +{ + pt->fl_table = (u32 *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K)); + if (!pt->fl_table) + return -ENOMEM; + + pt->fl_table_shadow = (u32 *)__get_free_pages(GFP_KERNEL, + get_order(SZ_16K)); + if (!pt->fl_table_shadow) { + free_pages((unsigned long)pt->fl_table, get_order(SZ_16K)); + return -ENOMEM; + } + + memset(pt->fl_table, 0, SZ_16K); + memset(pt->fl_table_shadow, 0, SZ_16K); + clean_pte(pt->fl_table, NUM_FL_PTE * sizeof(u64), pt->redirect); + + return 0; +} + +void msm_iommu_pagetable_free(struct msm_iommu_pt *pt) +{ + u32 *fl_table; + u32 *fl_table_shadow; + int i; + + fl_table = pt->fl_table; + fl_table_shadow = pt->fl_table_shadow; + for (i = 0; i < NUM_FL_PTE; i++) + if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) + free_page((unsigned long) __va(((fl_table[i]) & + FL_BASE_MASK))); + free_pages((unsigned long)fl_table, get_order(SZ_16K)); + pt->fl_table = 0; + + free_pages((unsigned long)fl_table_shadow, get_order(SZ_16K)); + pt->fl_table_shadow = 0; +} + +void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va, + size_t len) +{ + /* + * Adding 2 for worst case. We could be spanning 3 second level pages + * if we unmapped just over 1MB. + */ + u32 n_entries = len / SZ_1M + 2; + u32 fl_offset = FL_OFFSET(va); + u32 i; + + for (i = 0; i < n_entries && fl_offset < NUM_FL_PTE; ++i) { + u32 *fl_pte_shadow = pt->fl_table_shadow + fl_offset; + void *sl_table_va = __va(((*fl_pte_shadow) & ~0x1FF)); + u32 sl_table = *fl_pte_shadow; + + if (sl_table && !(sl_table & 0x1FF)) { + free_pages((unsigned long) sl_table_va, + get_order(SZ_4K)); + *fl_pte_shadow = 0; + } + ++fl_offset; + } +} + +static int __get_pgprot(int prot, int len) +{ + unsigned int pgprot; + int tex; + + if (!(prot & (IOMMU_READ | IOMMU_WRITE))) { + prot |= IOMMU_READ | IOMMU_WRITE; + WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n"); + } + + if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) { + /* Write-only unsupported falling back to RW */ + prot |= IOMMU_READ; + } + + if (prot & IOMMU_CACHE) + tex = (pgprot_val(PAGE_KERNEL) >> 2) & 0x07; + else + tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED]; + + if (tex < 0 || tex > NUM_TEX_CLASS - 1) + return 0; + + if (len == SZ_16M || len == SZ_1M) { + pgprot = FL_SHARED; + pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; + pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; + pgprot |= tex & 0x04 ? FL_TEX0 : 0; + pgprot |= prot & IOMMU_PRIV ? FL_AP0 : + (FL_AP0 | FL_AP1); + pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2; + } else { + pgprot = SL_SHARED; + pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; + pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; + pgprot |= tex & 0x04 ? SL_TEX0 : 0; + pgprot |= prot & IOMMU_PRIV ? SL_AP0 : + (SL_AP0 | SL_AP1); + pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2; + } + + return pgprot; +} + +static u32 *make_second_level(struct msm_iommu_pt *pt, u32 *fl_pte, + u32 *fl_pte_shadow) +{ + u32 *sl; + sl = (u32 *) __get_free_pages(GFP_ATOMIC, + get_order(SZ_4K)); + + if (!sl) { + pr_debug("Could not allocate second level table\n"); + goto fail; + } + memset(sl, 0, SZ_4K); + clean_pte(sl, (NUM_SL_PTE + GUARD_PTE) * sizeof(u32), pt->redirect); + + *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \ + FL_TYPE_TABLE); + *fl_pte_shadow = *fl_pte & ~0x1FF; + + clean_pte(fl_pte, sizeof(u32), pt->redirect); +fail: + return sl; +} + +static int sl_4k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot) +{ + int ret = 0; + + if (*sl_pte) { + ret = -EBUSY; + goto fail; + } + + *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED + | SL_TYPE_SMALL | pgprot; +fail: + return ret; +} + +static int sl_64k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot) +{ + int ret = 0; + + int i; + + for (i = 0; i < 16; i++) + if (*(sl_pte+i)) { + ret = -EBUSY; + goto fail; + } + + for (i = 0; i < 16; i++) + *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG + | SL_SHARED | SL_TYPE_LARGE | pgprot; + +fail: + return ret; +} + +static inline int fl_1m(u32 *fl_pte, phys_addr_t pa, int pgprot) +{ + if (*fl_pte) + return -EBUSY; + + *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED + | pgprot; + + return 0; +} + +static inline int fl_16m(u32 *fl_pte, phys_addr_t pa, int pgprot) +{ + int i; + int ret = 0; + for (i = 0; i < 16; i++) + if (*(fl_pte+i)) { + ret = -EBUSY; + goto fail; + } + for (i = 0; i < 16; i++) + *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION + | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot; +fail: + return ret; +} + +int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va, + phys_addr_t pa, size_t len, int prot) +{ + int ret; + struct scatterlist sg; + + if (len != SZ_16M && len != SZ_1M && + len != SZ_64K && len != SZ_4K) { + pr_debug("Bad size: %zd\n", len); + ret = -EINVAL; + goto fail; + } + + sg_init_table(&sg, 1); + sg_dma_address(&sg) = pa; + sg.length = len; + + ret = msm_iommu_pagetable_map_range(pt, va, &sg, len, prot); + +fail: + return ret; +} + +size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va, + size_t len) +{ + msm_iommu_pagetable_unmap_range(pt, va, len); + return len; +} + +static phys_addr_t get_phys_addr(struct scatterlist *sg) +{ + /* + * Try sg_dma_address first so that we can + * map carveout regions that do not have a + * struct page associated with them. + */ + phys_addr_t pa = sg_dma_address(sg); + if (pa == 0) + pa = sg_phys(sg); + return pa; +} + +/* + * For debugging we may want to force mappings to be 4K only + */ +#ifdef CONFIG_IOMMU_FORCE_4K_MAPPINGS +static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len, + int align) +{ + if (align == SZ_4K) { + return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align) + && (len >= align); + } else { + return 0; + } +} +#else +static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len, + int align) +{ + return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align) + && (len >= align); +} +#endif + +int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va, + struct scatterlist *sg, unsigned int len, int prot) +{ + phys_addr_t pa; + unsigned int start_va = va; + unsigned int offset = 0; + u32 *fl_pte; + u32 *fl_pte_shadow; + u32 fl_offset; + u32 *sl_table = NULL; + u32 sl_offset, sl_start; + unsigned int chunk_size, chunk_offset = 0; + int ret = 0; + unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m; + + BUG_ON(len & (SZ_4K - 1)); + + pgprot4k = __get_pgprot(prot, SZ_4K); + pgprot64k = __get_pgprot(prot, SZ_64K); + pgprot1m = __get_pgprot(prot, SZ_1M); + pgprot16m = __get_pgprot(prot, SZ_16M); + if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) { + ret = -EINVAL; + goto fail; + } + + fl_offset = FL_OFFSET(va); /* Upper 12 bits */ + fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */ + fl_pte_shadow = pt->fl_table_shadow + fl_offset; + pa = get_phys_addr(sg); + + while (offset < len) { + chunk_size = SZ_4K; + + if (is_fully_aligned(va, pa, sg->length - chunk_offset, + SZ_16M)) + chunk_size = SZ_16M; + else if (is_fully_aligned(va, pa, sg->length - chunk_offset, + SZ_1M)) + chunk_size = SZ_1M; + /* 64k or 4k determined later */ + +// trace_iommu_map_range(va, pa, sg->length, chunk_size); + + /* for 1M and 16M, only first level entries are required */ + if (chunk_size >= SZ_1M) { + if (chunk_size == SZ_16M) { + ret = fl_16m(fl_pte, pa, pgprot16m); + if (ret) + goto fail; + clean_pte(fl_pte, 16 * sizeof(u32), pt->redirect); + fl_pte += 16; + fl_pte_shadow += 16; + } else if (chunk_size == SZ_1M) { + ret = fl_1m(fl_pte, pa, pgprot1m); + if (ret) + goto fail; + clean_pte(fl_pte, sizeof(u32), pt->redirect); + fl_pte++; + fl_pte_shadow++; + } + + offset += chunk_size; + chunk_offset += chunk_size; + va += chunk_size; + pa += chunk_size; + + if (chunk_offset >= sg->length && offset < len) { + chunk_offset = 0; + sg = sg_next(sg); + pa = get_phys_addr(sg); + } + continue; + } + /* for 4K or 64K, make sure there is a second level table */ + if (*fl_pte == 0) { + if (!make_second_level(pt, fl_pte, fl_pte_shadow)) { + ret = -ENOMEM; + goto fail; + } + } + if (!(*fl_pte & FL_TYPE_TABLE)) { + ret = -EBUSY; + goto fail; + } + sl_table = __va(((*fl_pte) & FL_BASE_MASK)); + sl_offset = SL_OFFSET(va); + /* Keep track of initial position so we + * don't clean more than we have to + */ + sl_start = sl_offset; + + /* Build the 2nd level page table */ + while (offset < len && sl_offset < NUM_SL_PTE) { + /* Map a large 64K page if the chunk is large enough and + * the pa and va are aligned + */ + + if (is_fully_aligned(va, pa, sg->length - chunk_offset, + SZ_64K)) + chunk_size = SZ_64K; + else + chunk_size = SZ_4K; + +// trace_iommu_map_range(va, pa, sg->length, +// chunk_size); + + if (chunk_size == SZ_4K) { + sl_4k(&sl_table[sl_offset], pa, pgprot4k); + sl_offset++; + /* Increment map count */ + (*fl_pte_shadow)++; + } else { + BUG_ON(sl_offset + 16 > NUM_SL_PTE); + sl_64k(&sl_table[sl_offset], pa, pgprot64k); + sl_offset += 16; + /* Increment map count */ + *fl_pte_shadow += 16; + } + + offset += chunk_size; + chunk_offset += chunk_size; + va += chunk_size; + pa += chunk_size; + + if (chunk_offset >= sg->length && offset < len) { + chunk_offset = 0; + sg = sg_next(sg); + pa = get_phys_addr(sg); + } + } + + clean_pte(sl_table + sl_start, (sl_offset - sl_start) * sizeof(u32), + pt->redirect); + fl_pte++; + fl_pte_shadow++; + sl_offset = 0; + } + +fail: + if (ret && offset > 0) + msm_iommu_pagetable_unmap_range(pt, start_va, offset); + + return ret; +} + +void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va, + unsigned int len) +{ + unsigned int offset = 0; + u32 *fl_pte; + u32 *fl_pte_shadow; + u32 fl_offset; + u32 *sl_table; + u32 sl_start, sl_end; + int used; + + BUG_ON(len & (SZ_4K - 1)); + + fl_offset = FL_OFFSET(va); /* Upper 12 bits */ + fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */ + fl_pte_shadow = pt->fl_table_shadow + fl_offset; + + while (offset < len) { + if (*fl_pte & FL_TYPE_TABLE) { + unsigned int n_entries; + + sl_start = SL_OFFSET(va); + sl_table = __va(((*fl_pte) & FL_BASE_MASK)); + sl_end = ((len - offset) / SZ_4K) + sl_start; + + if (sl_end > NUM_SL_PTE) + sl_end = NUM_SL_PTE; + n_entries = sl_end - sl_start; + + memset(sl_table + sl_start, 0, n_entries * 4); + clean_pte(sl_table + sl_start, (sl_end - sl_start) * sizeof(u32), + pt->redirect); + + offset += n_entries * SZ_4K; + va += n_entries * SZ_4K; + + BUG_ON((*fl_pte_shadow & 0x1FF) < n_entries); + + /* Decrement map count */ + *fl_pte_shadow -= n_entries; + used = *fl_pte_shadow & 0x1FF; + + if (!used) { + *fl_pte = 0; + clean_pte(fl_pte, sizeof(u32), pt->redirect); + } + + sl_start = 0; + } else { + *fl_pte = 0; + *fl_pte_shadow = 0; + + clean_pte(fl_pte, sizeof(u32), pt->redirect); + va += SZ_1M; + offset += SZ_1M; + sl_start = 0; + } + fl_pte++; + fl_pte_shadow++; + } +} + +phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain, + phys_addr_t va) +{ + struct msm_iommu_priv *priv = to_msm_priv(domain); + struct msm_iommu_pt *pt = &priv->pt; + u32 *fl_pte; + u32 fl_offset; + u32 *sl_table = NULL; + u32 sl_offset; + u32 *sl_pte; + + if (!pt->fl_table) { + pr_err("Page table doesn't exist\n"); + return 0; + } + + fl_offset = FL_OFFSET(va); + fl_pte = pt->fl_table + fl_offset; + + if (*fl_pte & FL_TYPE_TABLE) { + sl_table = __va(((*fl_pte) & FL_BASE_MASK)); + sl_offset = SL_OFFSET(va); + sl_pte = sl_table + sl_offset; + /* 64 KB section */ + if (*sl_pte & SL_TYPE_LARGE) + return (*sl_pte & 0xFFFF0000) | (va & ~0xFFFF0000); + /* 4 KB section */ + if (*sl_pte & SL_TYPE_SMALL) + return (*sl_pte & 0xFFFFF000) | (va & ~0xFFFFF000); + } else { + /* 16 MB section */ + if (*fl_pte & FL_SUPERSECTION) + return (*fl_pte & 0xFF000000) | (va & ~0xFF000000); + /* 1 MB section */ + if (*fl_pte & FL_TYPE_SECT) + return (*fl_pte & 0xFFF00000) | (va & ~0xFFF00000); + } + return 0; +} + +static int get_tex_class(int icp, int ocp, int mt, int nos) +{ + int i = 0; + unsigned int prrr; + unsigned int nmrr; + int c_icp, c_ocp, c_mt, c_nos; + + prrr = msm_iommu_get_prrr(); + nmrr = msm_iommu_get_nmrr(); + + for (i = 0; i < NUM_TEX_CLASS; i++) { + c_nos = PRRR_NOS(prrr, i); + c_mt = PRRR_MT(prrr, i); + c_icp = NMRR_ICP(nmrr, i); + c_ocp = NMRR_OCP(nmrr, i); + + if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) + return i; + } + + return -ENODEV; +} + +static void setup_iommu_tex_classes(void) +{ + msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = + get_tex_class(CP_NONCACHED, CP_NONCACHED, + MT_IOMMU_NORMAL, 1); + + msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = + get_tex_class(CP_WB_WA, CP_WB_WA, MT_IOMMU_NORMAL, 1); + + msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = + get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_IOMMU_NORMAL, 1); + + msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = + get_tex_class(CP_WT, CP_WT, MT_IOMMU_NORMAL, 1); +} + +void msm_iommu_pagetable_init(void) +{ + setup_iommu_tex_classes(); +} diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.h b/drivers/iommu/qcom/msm_iommu_pagetable.h new file mode 100644 index 000000000000..12a8d274f95e --- /dev/null +++ b/drivers/iommu/qcom/msm_iommu_pagetable.h @@ -0,0 +1,33 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H +#define __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H + +struct msm_iommu_pt; + +void msm_iommu_pagetable_init(void); +int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt); +void msm_iommu_pagetable_free(struct msm_iommu_pt *pt); +int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va, + phys_addr_t pa, size_t len, int prot); +size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va, + size_t len); +int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va, + struct scatterlist *sg, unsigned int len, int prot); +void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va, + unsigned int len); +phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain, + phys_addr_t va); +void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va, + size_t len); +#endif diff --git a/drivers/iommu/qcom/msm_iommu_perfmon.h b/drivers/iommu/qcom/msm_iommu_perfmon.h new file mode 100644 index 000000000000..45683f4ebd88 --- /dev/null +++ b/drivers/iommu/qcom/msm_iommu_perfmon.h @@ -0,0 +1,233 @@ +/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/err.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/irqreturn.h> + +#ifndef MSM_IOMMU_PERFMON_H +#define MSM_IOMMU_PERFMON_H + +/** + * struct iommu_pmon_counter - container for a performance counter. + * @counter_no: counter number within the group + * @absolute_counter_no: counter number within IOMMU PMU + * @value: cached counter value + * @overflow_count: no of times counter has overflowed + * @enabled: indicates whether counter is enabled or not + * @current_event_class: current selected event class, -1 if none + * @counter_dir: debugfs directory for this counter + * @cnt_group: group this counter belongs to + */ +struct iommu_pmon_counter { + unsigned int counter_no; + unsigned int absolute_counter_no; + unsigned long value; + unsigned long overflow_count; + unsigned int enabled; + int current_event_class; + struct dentry *counter_dir; + struct iommu_pmon_cnt_group *cnt_group; +}; + +/** + * struct iommu_pmon_cnt_group - container for a perf mon counter group. + * @grp_no: group number + * @num_counters: number of counters in this group + * @counters: list of counter in this group + * @group_dir: debugfs directory for this group + * @pmon: pointer to the iommu_pmon object this group belongs to + */ +struct iommu_pmon_cnt_group { + unsigned int grp_no; + unsigned int num_counters; + struct iommu_pmon_counter *counters; + struct dentry *group_dir; + struct iommu_pmon *pmon; +}; + +/** + * struct iommu_info - container for a perf mon iommu info. + * @iommu_name: name of the iommu from device tree + * @base: virtual base address for this iommu + * @evt_irq: irq number for event overflow interrupt + * @iommu_dev: pointer to iommu device + * @ops: iommu access operations pointer. + * @hw_ops: iommu pm hw access operations pointer. + * @always_on: 1 if iommu is always on, 0 otherwise. + */ +struct iommu_info { + const char *iommu_name; + void *base; + int evt_irq; + struct device *iommu_dev; + struct iommu_access_ops *ops; + struct iommu_pm_hw_ops *hw_ops; + unsigned int always_on; +}; + +/** + * struct iommu_pmon - main container for a perf mon data. + * @iommu_dir: debugfs directory for this iommu + * @iommu: iommu_info instance + * @iommu_list: iommu_list head + * @cnt_grp: list of counter groups + * @num_groups: number of counter groups + * @num_counters: number of counters per group + * @event_cls_supported: an array of event classes supported for this PMU + * @nevent_cls_supported: number of event classes supported. + * @enabled: Indicates whether perf. mon is enabled or not + * @iommu_attached Indicates whether iommu is attached or not. + * @lock: mutex used to synchronize access to shared data + */ +struct iommu_pmon { + struct dentry *iommu_dir; + struct iommu_info iommu; + struct list_head iommu_list; + struct iommu_pmon_cnt_group *cnt_grp; + u32 num_groups; + u32 num_counters; + u32 *event_cls_supported; + u32 nevent_cls_supported; + unsigned int enabled; + unsigned int iommu_attach_count; + struct mutex lock; +}; + +/** + * struct iommu_hw_ops - Callbacks for accessing IOMMU HW + * @initialize_hw: Call to do any initialization before enabling ovf interrupts + * @is_hw_access_ok: Returns 1 if we can access HW, 0 otherwise + * @grp_enable: Call to enable a counter group + * @grp_disable: Call to disable a counter group + * @enable_pm: Call to enable PM + * @disable_pm: Call to disable PM + * @reset_counters: Call to reset counters + * @check_for_overflow: Call to check for overflow + * @evt_ovfl_int_handler: Overflow interrupt handler callback + * @counter_enable: Call to enable counters + * @counter_disable: Call to disable counters + * @ovfl_int_enable: Call to enable overflow interrupts + * @ovfl_int_disable: Call to disable overflow interrupts + * @set_event_class: Call to set event class + * @read_counter: Call to read a counter value + */ +struct iommu_pm_hw_ops { + void (*initialize_hw)(const struct iommu_pmon *); + unsigned int (*is_hw_access_OK)(const struct iommu_pmon *); + void (*grp_enable)(struct iommu_info *, unsigned int); + void (*grp_disable)(struct iommu_info *, unsigned int); + void (*enable_pm)(struct iommu_info *); + void (*disable_pm)(struct iommu_info *); + void (*reset_counters)(const struct iommu_info *); + void (*check_for_overflow)(struct iommu_pmon *); + irqreturn_t (*evt_ovfl_int_handler)(int, void *); + void (*counter_enable)(struct iommu_info *, + struct iommu_pmon_counter *); + void (*counter_disable)(struct iommu_info *, + struct iommu_pmon_counter *); + void (*ovfl_int_enable)(struct iommu_info *, + const struct iommu_pmon_counter *); + void (*ovfl_int_disable)(struct iommu_info *, + const struct iommu_pmon_counter *); + void (*set_event_class)(struct iommu_pmon *pmon, unsigned int, + unsigned int); + unsigned int (*read_counter)(struct iommu_pmon_counter *); +}; + +#define MSM_IOMMU_PMU_NO_EVENT_CLASS -1 + +#ifdef CONFIG_MSM_IOMMU_PMON + +/** + * Get pointer to PMU hardware access functions for IOMMUv0 PMU + */ +struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void); + +/** + * Get pointer to PMU hardware access functions for IOMMUv1 PMU + */ +struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void); + +/** + * Allocate memory for performance monitor structure. Must + * be called before iommu_pm_iommu_register + */ +struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev); + +/** + * Free memory previously allocated with iommu_pm_alloc + */ +void msm_iommu_pm_free(struct device *iommu_dev); + +/** + * Register iommu with the performance monitor module. + */ +int msm_iommu_pm_iommu_register(struct iommu_pmon *info); + +/** + * Unregister iommu with the performance monitor module. + */ +void msm_iommu_pm_iommu_unregister(struct device *dev); + +/** + * Called by iommu driver when attaching is complete + * Must NOT be called with IOMMU mutexes held. + * @param iommu_dev IOMMU device that is attached + */ +void msm_iommu_attached(struct device *dev); + +/** + * Called by iommu driver before detaching. + * Must NOT be called with IOMMU mutexes held. + * @param iommu_dev IOMMU device that is going to be detached + */ +void msm_iommu_detached(struct device *dev); +#else +static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void) +{ + return NULL; +} + +static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void) +{ + return NULL; +} + +static inline struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev) +{ + return NULL; +} + +static inline void msm_iommu_pm_free(struct device *iommu_dev) +{ + return; +} + +static inline int msm_iommu_pm_iommu_register(struct iommu_pmon *info) +{ + return -EIO; +} + +static inline void msm_iommu_pm_iommu_unregister(struct device *dev) +{ +} + +static inline void msm_iommu_attached(struct device *dev) +{ +} + +static inline void msm_iommu_detached(struct device *dev) +{ +} +#endif +#endif diff --git a/drivers/iommu/qcom/msm_iommu_priv.h b/drivers/iommu/qcom/msm_iommu_priv.h new file mode 100644 index 000000000000..b013f0bd2b58 --- /dev/null +++ b/drivers/iommu/qcom/msm_iommu_priv.h @@ -0,0 +1,73 @@ +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef MSM_IOMMU_PRIV_H +#define MSM_IOMMU_PRIV_H + +/** + * struct msm_iommu_pt - Container for first level page table and its + * attributes. + * fl_table: Pointer to the first level page table. + * redirect: Set to 1 if L2 redirect for page tables are enabled, 0 otherwise. + * unaligned_fl_table: Original address of memory for the page table. + * fl_table is manually aligned (as per spec) but we need the original address + * to free the table. + * fl_table_shadow: This is "copy" of the fl_table with some differences. + * It stores the same information as fl_table except that instead of storing + * second level page table address + page table entry descriptor bits it + * stores the second level page table address and the number of used second + * level page tables entries. This is used to check whether we need to free + * the second level page table which allows us to also free the second level + * page table after doing a TLB invalidate which should catch bugs with + * clients trying to unmap an address that is being used. + * fl_table_shadow will use the lower 9 bits for the use count and the upper + * bits for the second level page table address. + * sl_table_shadow uses the same concept as fl_table_shadow but for LPAE 2nd + * level page tables. + */ +#ifdef CONFIG_IOMMU_LPAE +struct msm_iommu_pt { + u64 *fl_table; + u64 **sl_table_shadow; + int redirect; + u64 *unaligned_fl_table; +}; +#else +struct msm_iommu_pt { + u32 *fl_table; + int redirect; + u32 *fl_table_shadow; +}; +#endif +/** + * struct msm_iommu_priv - Container for page table attributes and other + * private iommu domain information. + * attributes. + * pt: Page table attribute structure + * list_attached: List of devices (contexts) attached to this domain. + * client_name: Name of the domain client. + */ +struct msm_iommu_priv { + struct msm_iommu_pt pt; + struct list_head list_attached; + struct iommu_domain domain; + const char *client_name; +}; + +static inline struct msm_iommu_priv *to_msm_priv(struct iommu_domain *dom) +{ + return container_of(dom, struct msm_iommu_priv, domain); +} + +int msm_iommu_init(struct device *dev); + +#endif diff --git a/drivers/iommu/qcom/msm_iommu_sec.c b/drivers/iommu/qcom/msm_iommu_sec.c new file mode 100644 index 000000000000..03ca942c3c2d --- /dev/null +++ b/drivers/iommu/qcom/msm_iommu_sec.c @@ -0,0 +1,773 @@ +/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/iommu.h> +#include <linux/clk.h> +#include <linux/scatterlist.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/kmemleak.h> +#include <linux/dma-mapping.h> +#include <linux/qcom_scm.h> + +#include <asm/cacheflush.h> +#include <asm/sizes.h> + +#include "msm_iommu_perfmon.h" +#include "msm_iommu_hw-v1.h" +#include "msm_iommu_priv.h" +#include "qcom_iommu.h" +#include <trace/events/kmem.h> + +/* bitmap of the page sizes currently supported */ +#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) + +/* commands for SCM_SVC_MP */ +#define IOMMU_SECURE_CFG 2 +#define IOMMU_SECURE_PTBL_SIZE 3 +#define IOMMU_SECURE_PTBL_INIT 4 +#define IOMMU_SET_CP_POOL_SIZE 5 +#define IOMMU_SECURE_MAP 6 +#define IOMMU_SECURE_UNMAP 7 +#define IOMMU_SECURE_MAP2 0x0B +#define IOMMU_SECURE_MAP2_FLAT 0x12 +#define IOMMU_SECURE_UNMAP2 0x0C +#define IOMMU_SECURE_UNMAP2_FLAT 0x13 +#define IOMMU_TLBINVAL_FLAG 0x00000001 + +/* commands for SCM_SVC_UTIL */ +#define IOMMU_DUMP_SMMU_FAULT_REGS 0X0C +#define SCM_SVC_MP 0xc + +static struct iommu_access_ops *iommu_access_ops; +static int is_secure; + +static const struct of_device_id msm_smmu_list[] = { + { .compatible = "qcom,msm-smmu-v1", }, + { .compatible = "qcom,msm-smmu-v2", }, + { } +}; + +struct msm_scm_paddr_list { + unsigned int list; + unsigned int list_size; + unsigned int size; +}; + +struct msm_scm_mapping_info { + unsigned int id; + unsigned int ctx_id; + unsigned int va; + unsigned int size; +}; + +struct msm_scm_map2_req { + struct msm_scm_paddr_list plist; + struct msm_scm_mapping_info info; + unsigned int flags; +}; + +struct msm_scm_unmap2_req { + struct msm_scm_mapping_info info; + unsigned int flags; +}; + +struct msm_cp_pool_size { + uint32_t size; + uint32_t spare; +}; + +#define NUM_DUMP_REGS 14 +/* + * some space to allow the number of registers returned by the secure + * environment to grow + */ +#define WIGGLE_ROOM (NUM_DUMP_REGS * 2) +/* Each entry is a (reg_addr, reg_val) pair, hence the * 2 */ +#define SEC_DUMP_SIZE ((NUM_DUMP_REGS * 2) + WIGGLE_ROOM) + +struct msm_scm_fault_regs_dump { + uint32_t dump_size; + uint32_t dump_data[SEC_DUMP_SIZE]; +} __aligned(PAGE_SIZE); + +void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops) +{ + iommu_access_ops = access_ops; +} + +static int msm_iommu_dump_fault_regs(int smmu_id, int cb_num, + struct msm_scm_fault_regs_dump *regs) +{ + int ret; + + __dma_flush_area(regs, sizeof(*regs)); + + ret = qcom_scm_iommu_dump_fault_regs(smmu_id, cb_num, + virt_to_phys(regs), sizeof(*regs)); + + __dma_flush_area(regs, sizeof(*regs)); + + return ret; +} + +static int msm_iommu_reg_dump_to_regs( + struct msm_iommu_context_reg ctx_regs[], + struct msm_scm_fault_regs_dump *dump, struct msm_iommu_drvdata *drvdata, + struct msm_iommu_ctx_drvdata *ctx_drvdata) +{ + int i, j, ret = 0; + const uint32_t nvals = (dump->dump_size / sizeof(uint32_t)); + uint32_t *it = (uint32_t *) dump->dump_data; + const uint32_t * const end = ((uint32_t *) dump) + nvals; + phys_addr_t phys_base = drvdata->phys_base; + int ctx = ctx_drvdata->num; + + if (!nvals) + return -EINVAL; + + for (i = 1; it < end; it += 2, i += 2) { + unsigned int reg_offset; + uint32_t addr = *it; + uint32_t val = *(it + 1); + struct msm_iommu_context_reg *reg = NULL; + if (addr < phys_base) { + pr_err("Bogus-looking register (0x%x) for Iommu with base at %pa. Skipping.\n", + addr, &phys_base); + continue; + } + reg_offset = addr - phys_base; + + for (j = 0; j < MAX_DUMP_REGS; ++j) { + struct dump_regs_tbl_entry dump_reg = dump_regs_tbl[j]; + void *test_reg; + unsigned int test_offset; + switch (dump_reg.dump_reg_type) { + case DRT_CTX_REG: + test_reg = CTX_REG(dump_reg.reg_offset, + drvdata->cb_base, ctx); + break; + case DRT_GLOBAL_REG: + test_reg = GLB_REG( + dump_reg.reg_offset, drvdata->glb_base); + break; + case DRT_GLOBAL_REG_N: + test_reg = GLB_REG_N( + drvdata->glb_base, ctx, + dump_reg.reg_offset); + break; + default: + pr_err("Unknown dump_reg_type: 0x%x\n", + dump_reg.dump_reg_type); + BUG(); + break; + } + test_offset = test_reg - drvdata->glb_base; + if (test_offset == reg_offset) { + reg = &ctx_regs[j]; + break; + } + } + + if (reg == NULL) { + pr_debug("Unknown register in secure CB dump: %x\n", + addr); + continue; + } + + if (reg->valid) { + WARN(1, "Invalid (repeated?) register in CB dump: %x\n", + addr); + continue; + } + + reg->val = val; + reg->valid = true; + } + + if (i != nvals) { + pr_err("Invalid dump! %d != %d\n", i, nvals); + ret = 1; + } + + for (i = 0; i < MAX_DUMP_REGS; ++i) { + if (!ctx_regs[i].valid) { + if (dump_regs_tbl[i].must_be_present) { + pr_err("Register missing from dump for ctx %d: %s, 0x%x\n", + ctx, + dump_regs_tbl[i].name, + dump_regs_tbl[i].reg_offset); + ret = 1; + } + ctx_regs[i].val = 0xd00dfeed; + } + } + + return ret; +} + +irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id) +{ + struct platform_device *pdev = dev_id; + struct msm_iommu_drvdata *drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + struct msm_scm_fault_regs_dump *regs; + int tmp, ret = IRQ_HANDLED; + + iommu_access_ops->iommu_lock_acquire(0); + + BUG_ON(!pdev); + + drvdata = dev_get_drvdata(pdev->dev.parent); + BUG_ON(!drvdata); + + ctx_drvdata = dev_get_drvdata(&pdev->dev); + BUG_ON(!ctx_drvdata); + + regs = kzalloc(sizeof(*regs), GFP_ATOMIC); + if (!regs) { + pr_err("%s: Couldn't allocate memory\n", __func__); + goto lock_release; + } + + if (!drvdata->ctx_attach_count) { + pr_err("Unexpected IOMMU page fault from secure context bank!\n"); + pr_err("name = %s\n", drvdata->name); + pr_err("Power is OFF. Unable to read page fault information\n"); + /* + * We cannot determine which context bank caused the issue so + * we just return handled here to ensure IRQ handler code is + * happy + */ + goto free_regs; + } + + tmp = msm_iommu_dump_fault_regs(drvdata->sec_id, + ctx_drvdata->num, regs); + + if (tmp) { + pr_err("%s: Couldn't dump fault registers (%d) %s, ctx: %d\n", + __func__, tmp, drvdata->name, ctx_drvdata->num); + goto free_regs; + } else { + struct msm_iommu_context_reg ctx_regs[MAX_DUMP_REGS]; + memset(ctx_regs, 0, sizeof(ctx_regs)); + tmp = msm_iommu_reg_dump_to_regs( + ctx_regs, regs, drvdata, ctx_drvdata); + if (tmp < 0) { + ret = IRQ_NONE; + pr_err("Incorrect response from secure environment\n"); + goto free_regs; + } + + if (ctx_regs[DUMP_REG_FSR].val) { + if (tmp) + pr_err("Incomplete fault register dump. Printout will be incomplete.\n"); + if (!ctx_drvdata->attached_domain) { + pr_err("Bad domain in interrupt handler\n"); + tmp = -ENOSYS; + } else { + tmp = report_iommu_fault( + ctx_drvdata->attached_domain, + &ctx_drvdata->pdev->dev, + COMBINE_DUMP_REG( + ctx_regs[DUMP_REG_FAR1].val, + ctx_regs[DUMP_REG_FAR0].val), + 0); + } + + /* if the fault wasn't handled by someone else: */ + if (tmp == -ENOSYS) { + pr_err("Unexpected IOMMU page fault from secure context bank!\n"); + pr_err("name = %s\n", drvdata->name); + pr_err("context = %s (%d)\n", ctx_drvdata->name, + ctx_drvdata->num); + pr_err("Interesting registers:\n"); + print_ctx_regs(ctx_regs); + } + } else { + ret = IRQ_NONE; + } + } +free_regs: + kfree(regs); +lock_release: + iommu_access_ops->iommu_lock_release(0); + return ret; +} + +int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata, + struct msm_iommu_ctx_drvdata *ctx_drvdata) +{ + if (drvdata->smmu_local_base) { + writel_relaxed(0xFFFFFFFF, + drvdata->smmu_local_base + SMMU_INTR_SEL_NS); + mb(); + } + + return qcom_scm_restore_sec_cfg(drvdata->sec_id, ctx_drvdata->num); +} + +static int msm_iommu_sec_map2(struct msm_scm_map2_req *map) +{ + u32 flags; + +#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP + flags = IOMMU_TLBINVAL_FLAG; +#else + flags = 0; +#endif + + return qcom_scm_iommu_secure_map(map->plist.list, + map->plist.list_size, + map->plist.size, + map->info.id, + map->info.ctx_id, + map->info.va, + map->info.size, + flags); +} + +static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata, + struct msm_iommu_ctx_drvdata *ctx_drvdata, + unsigned long va, phys_addr_t pa, size_t len) +{ + struct msm_scm_map2_req map; + void *flush_va, *flush_va_end; + int ret = 0; + + if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M) || + !IS_ALIGNED(pa, SZ_1M)) + return -EINVAL; + map.plist.list = virt_to_phys(&pa); + map.plist.list_size = 1; + map.plist.size = len; + map.info.id = iommu_drvdata->sec_id; + map.info.ctx_id = ctx_drvdata->num; + map.info.va = va; + map.info.size = len; + + flush_va = &pa; + flush_va_end = (void *) + (((unsigned long) flush_va) + sizeof(phys_addr_t)); + + /* + * Ensure that the buffer is in RAM by the time it gets to TZ + */ + __dma_flush_area(flush_va, sizeof(phys_addr_t)); + + ret = msm_iommu_sec_map2(&map); + if (ret) + return -EINVAL; + + return 0; +} + +static unsigned int get_phys_addr(struct scatterlist *sg) +{ + /* + * Try sg_dma_address first so that we can + * map carveout regions that do not have a + * struct page associated with them. + */ + unsigned int pa = sg_dma_address(sg); + if (pa == 0) + pa = sg_phys(sg); + return pa; +} + +static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata, + struct msm_iommu_ctx_drvdata *ctx_drvdata, + unsigned long va, struct scatterlist *sg, size_t len) +{ + struct scatterlist *sgiter; + struct msm_scm_map2_req map; + unsigned int *pa_list = 0; + unsigned int pa, cnt; + void *flush_va, *flush_va_end; + unsigned int offset = 0, chunk_offset = 0; + int ret; + + if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M)) + return -EINVAL; + + map.info.id = iommu_drvdata->sec_id; + map.info.ctx_id = ctx_drvdata->num; + map.info.va = va; + map.info.size = len; + + if (sg->length == len) { + /* + * physical address for secure mapping needs + * to be 1MB aligned + */ + pa = get_phys_addr(sg); + if (!IS_ALIGNED(pa, SZ_1M)) + return -EINVAL; + map.plist.list = virt_to_phys(&pa); + map.plist.list_size = 1; + map.plist.size = len; + flush_va = &pa; + } else { + sgiter = sg; + if (!IS_ALIGNED(sgiter->length, SZ_1M)) + return -EINVAL; + cnt = sg->length / SZ_1M; + while ((sgiter = sg_next(sgiter))) { + if (!IS_ALIGNED(sgiter->length, SZ_1M)) + return -EINVAL; + cnt += sgiter->length / SZ_1M; + } + + pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL); + if (!pa_list) + return -ENOMEM; + + sgiter = sg; + cnt = 0; + pa = get_phys_addr(sgiter); + if (!IS_ALIGNED(pa, SZ_1M)) { + kfree(pa_list); + return -EINVAL; + } + while (offset < len) { + pa += chunk_offset; + pa_list[cnt] = pa; + chunk_offset += SZ_1M; + offset += SZ_1M; + cnt++; + + if (chunk_offset >= sgiter->length && offset < len) { + chunk_offset = 0; + sgiter = sg_next(sgiter); + pa = get_phys_addr(sgiter); + } + } + + map.plist.list = virt_to_phys(pa_list); + map.plist.list_size = cnt; + map.plist.size = SZ_1M; + flush_va = pa_list; + } + + /* + * Ensure that the buffer is in RAM by the time it gets to TZ + */ + flush_va_end = (void *) (((unsigned long) flush_va) + + (map.plist.list_size * sizeof(*pa_list))); + __dma_flush_area(flush_va, (map.plist.list_size * sizeof(*pa_list))); + + ret = msm_iommu_sec_map2(&map); + kfree(pa_list); + + return ret; +} + +static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata, + struct msm_iommu_ctx_drvdata *ctx_drvdata, + unsigned long va, size_t len) +{ + if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M)) + return -EINVAL; + + return qcom_scm_iommu_secure_unmap(iommu_drvdata->sec_id, + ctx_drvdata->num, + va, + len, + IOMMU_TLBINVAL_FLAG); +} + +static struct iommu_domain * msm_iommu_domain_alloc(unsigned type) +{ + struct msm_iommu_priv *priv; + struct iommu_domain *domain; + + if (type != IOMMU_DOMAIN_UNMANAGED) + return NULL; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + + INIT_LIST_HEAD(&priv->list_attached); + domain = &priv->domain; + return domain; +} + +static void msm_iommu_domain_free(struct iommu_domain *domain) +{ + struct msm_iommu_priv *priv; + + iommu_access_ops->iommu_lock_acquire(0); + priv = to_msm_priv(domain); + + kfree(priv); + iommu_access_ops->iommu_lock_release(0); +} + +static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct msm_iommu_priv *priv; + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + struct msm_iommu_ctx_drvdata *tmp_drvdata; + int ret = 0; + + iommu_access_ops->iommu_lock_acquire(0); + + priv = to_msm_priv(domain); + if (!priv || !dev) { + ret = -EINVAL; + goto fail; + } + + iommu_drvdata = dev_get_drvdata(dev->parent); + ctx_drvdata = dev_get_drvdata(dev); + if (!iommu_drvdata || !ctx_drvdata) { + ret = -EINVAL; + goto fail; + } + + if (!list_empty(&ctx_drvdata->attached_elm)) { + ret = -EBUSY; + goto fail; + } + + list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) + if (tmp_drvdata == ctx_drvdata) { + ret = -EBUSY; + goto fail; + } + + ret = iommu_access_ops->iommu_power_on(iommu_drvdata); + if (ret) + goto fail; + + /* We can only do this once */ + if (!iommu_drvdata->ctx_attach_count) { + ret = msm_iommu_sec_program_iommu(iommu_drvdata, + ctx_drvdata); + + /* bfb settings are always programmed by HLOS */ + program_iommu_bfb_settings(iommu_drvdata->base, + iommu_drvdata->bfb_settings); + } + + list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); + ctx_drvdata->attached_domain = domain; + ++iommu_drvdata->ctx_attach_count; + + iommu_access_ops->iommu_lock_release(0); + + msm_iommu_attached(dev->parent); + return ret; +fail: + iommu_access_ops->iommu_lock_release(0); + return ret; +} + +static void msm_iommu_detach_dev(struct iommu_domain *domain, + struct device *dev) +{ + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + + if (!dev) + return; + + msm_iommu_detached(dev->parent); + + iommu_access_ops->iommu_lock_acquire(0); + + iommu_drvdata = dev_get_drvdata(dev->parent); + ctx_drvdata = dev_get_drvdata(dev); + if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain) + goto fail; + + list_del_init(&ctx_drvdata->attached_elm); + ctx_drvdata->attached_domain = NULL; + + iommu_access_ops->iommu_power_off(iommu_drvdata); + BUG_ON(iommu_drvdata->ctx_attach_count == 0); + --iommu_drvdata->ctx_attach_count; +fail: + iommu_access_ops->iommu_lock_release(0); +} + +static int get_drvdata(struct iommu_domain *domain, + struct msm_iommu_drvdata **iommu_drvdata, + struct msm_iommu_ctx_drvdata **ctx_drvdata) +{ + struct msm_iommu_priv *priv = to_msm_priv(domain); + struct msm_iommu_ctx_drvdata *ctx; + + list_for_each_entry(ctx, &priv->list_attached, attached_elm) { + if (ctx->attached_domain == domain) + break; + } + + if (ctx->attached_domain != domain) + return -EINVAL; + + *ctx_drvdata = ctx; + *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent); + return 0; +} + +static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, + phys_addr_t pa, size_t len, int prot) +{ + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + int ret = 0; + + iommu_access_ops->iommu_lock_acquire(0); + + ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata); + if (ret) + goto fail; + + ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata, + va, pa, len); +fail: + iommu_access_ops->iommu_lock_release(0); + return ret; +} + +static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, + size_t len) +{ + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + int ret = -ENODEV; + + iommu_access_ops->iommu_lock_acquire(0); + + ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata); + if (ret) + goto fail; + + ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, + va, len); +fail: + iommu_access_ops->iommu_lock_release(0); + + /* the IOMMU API requires us to return how many bytes were unmapped */ + len = ret ? 0 : len; + return len; +} + +static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va, + struct scatterlist *sg, unsigned int len, + int prot) +{ + int ret; + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + + iommu_access_ops->iommu_lock_acquire(0); + + ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata); + if (ret) + goto fail; + ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata, + va, sg, len); +fail: + iommu_access_ops->iommu_lock_release(0); + return ret; +} + + +static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va, + unsigned int len) +{ + struct msm_iommu_drvdata *iommu_drvdata; + struct msm_iommu_ctx_drvdata *ctx_drvdata; + int ret = -EINVAL; + + if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M)) + return -EINVAL; + + iommu_access_ops->iommu_lock_acquire(0); + + ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata); + if (ret) + goto fail; + + ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len); + +fail: + iommu_access_ops->iommu_lock_release(0); + return ret ? ret : 0; +} + +static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, + phys_addr_t va) +{ + return 0; +} + +void msm_iommu_check_scm_call_avail(void) +{ + is_secure = qcom_scm_is_call_available(SCM_SVC_MP, IOMMU_SECURE_CFG); +} + +int msm_iommu_get_scm_call_avail(void) +{ + return is_secure; +} + +static struct iommu_ops msm_iommu_ops = { + .domain_alloc = msm_iommu_domain_alloc, + .domain_free = msm_iommu_domain_free, + .attach_dev = msm_iommu_attach_dev, + .detach_dev = msm_iommu_detach_dev, + .map = msm_iommu_map, + .unmap = msm_iommu_unmap, +/* .map_range = msm_iommu_map_range,*/ + .map_sg = default_iommu_map_sg, +/* .unmap_range = msm_iommu_unmap_range,*/ + .iova_to_phys = msm_iommu_iova_to_phys, + .pgsize_bitmap = MSM_IOMMU_PGSIZES, +}; + +static int __init msm_iommu_sec_init(void) +{ + int ret; + + ret = bus_register(&msm_iommu_sec_bus_type); + if (ret) + return ret; + + ret = bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops); + if (ret) { + bus_unregister(&msm_iommu_sec_bus_type); + return ret; + } + + return 0; +} + +subsys_initcall(msm_iommu_sec_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MSM SMMU Secure Driver"); diff --git a/drivers/iommu/qcom/qcom_iommu.h b/drivers/iommu/qcom/qcom_iommu.h new file mode 100644 index 000000000000..037c69d727a0 --- /dev/null +++ b/drivers/iommu/qcom/qcom_iommu.h @@ -0,0 +1,387 @@ +/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef MSM_IOMMU_H +#define MSM_IOMMU_H + +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/list.h> +#include <linux/regulator/consumer.h> + +/* Private pgprot flag */ +#define IOMMU_PRIV 16 + +extern pgprot_t pgprot_kernel; +extern struct bus_type msm_iommu_sec_bus_type; +extern struct iommu_access_ops iommu_access_ops_v0; +extern struct iommu_access_ops iommu_access_ops_v1; + +/* Domain attributes */ +#define MSM_IOMMU_DOMAIN_PT_CACHEABLE 0x1 +#define MSM_IOMMU_DOMAIN_PT_SECURE 0x2 + +/* Mask for the cache policy attribute */ +#define MSM_IOMMU_CP_MASK 0x03 + +#define DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE DOMAIN_ATTR_MAX + +/* Maximum number of Machine IDs that we are allowing to be mapped to the same + * context bank. The number of MIDs mapped to the same CB does not affect + * performance, but there is a practical limit on how many distinct MIDs may + * be present. These mappings are typically determined at design time and are + * not expected to change at run time. + */ +#define MAX_NUM_MIDS 32 + +/* Maximum number of SMT entries allowed by the system */ +#define MAX_NUM_SMR 128 + +#define MAX_NUM_BFB_REGS 32 + +/** + * struct msm_iommu_dev - a single IOMMU hardware instance + * name Human-readable name given to this IOMMU HW instance + * ncb Number of context banks present on this IOMMU HW instance + */ +struct msm_iommu_dev { + const char *name; + int ncb; + int ttbr_split; +}; + +/** + * struct msm_iommu_ctx_dev - an IOMMU context bank instance + * name Human-readable name given to this context bank + * num Index of this context bank within the hardware + * mids List of Machine IDs that are to be mapped into this context + * bank, terminated by -1. The MID is a set of signals on the + * AXI bus that identifies the function associated with a specific + * memory request. (See ARM spec). + */ +struct msm_iommu_ctx_dev { + const char *name; + int num; + int mids[MAX_NUM_MIDS]; +}; + +/** + * struct msm_iommu_bfb_settings - a set of IOMMU BFB tuning parameters + * regs An array of register offsets to configure + * data Values to write to corresponding registers + * length Number of valid entries in the offset/val arrays + */ +struct msm_iommu_bfb_settings { + unsigned int regs[MAX_NUM_BFB_REGS]; + unsigned int data[MAX_NUM_BFB_REGS]; + int length; +}; + +/** + * struct msm_iommu_drvdata - A single IOMMU hardware instance + * @base: IOMMU config port base address (VA) + * @glb_base: IOMMU config port base address for global register space (VA) + * @phys_base: IOMMU physical base address. + * @ncb The number of contexts on this IOMMU + * @irq: Interrupt number + * @core: The bus clock for this IOMMU hardware instance + * @iface: The clock for the IOMMU bus interconnect + * @name: Human-readable name of this IOMMU device + * @bfb_settings: Optional BFB performance tuning parameters + * @dev: Struct device this hardware instance is tied to + * @list: List head to link all iommus together + * @halt_enabled: Set to 1 if IOMMU halt is supported in the IOMMU, 0 otherwise. + * @ctx_attach_count: Count of how many context are attached. + * @bus_client : Bus client needed to vote for bus bandwidth. + * @needs_rem_spinlock : 1 if remote spinlock is needed, 0 otherwise + * @powered_on: Powered status of the IOMMU. 0 means powered off. + * + * A msm_iommu_drvdata holds the global driver data about a single piece + * of an IOMMU hardware instance. + */ +struct msm_iommu_drvdata { + void __iomem *base; + phys_addr_t phys_base; + void __iomem *glb_base; + void __iomem *cb_base; + void __iomem *smmu_local_base; + int ncb; + int ttbr_split; + struct clk *core; + struct clk *iface; + const char *name; + struct msm_iommu_bfb_settings *bfb_settings; + int sec_id; + struct device *dev; + struct list_head list; + int halt_enabled; + unsigned int ctx_attach_count; + unsigned int bus_client; + int needs_rem_spinlock; + int powered_on; + unsigned int model; + struct list_head masters; +}; + +int __enable_clocks(struct msm_iommu_drvdata *drvdata); +void __disable_clocks(struct msm_iommu_drvdata *drvdata); + +/** + * struct iommu_access_ops - Callbacks for accessing IOMMU + * @iommu_power_on: Turn on power to unit + * @iommu_power_off: Turn off power to unit + * @iommu_bus_vote: Vote for bus bandwidth + * @iommu_lock_initialize: Initialize the remote lock + * @iommu_lock_acquire: Acquire any locks needed + * @iommu_lock_release: Release locks needed + */ +struct iommu_access_ops { + int (*iommu_power_on)(struct msm_iommu_drvdata *); + void (*iommu_power_off)(struct msm_iommu_drvdata *); + int (*iommu_bus_vote)(struct msm_iommu_drvdata *drvdata, + unsigned int vote); + void * (*iommu_lock_initialize)(void); + void (*iommu_lock_acquire)(unsigned int need_extra_lock); + void (*iommu_lock_release)(unsigned int need_extra_lock); +}; + +void msm_iommu_add_drv(struct msm_iommu_drvdata *drv); +void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv); +void program_iommu_bfb_settings(void __iomem *base, + const struct msm_iommu_bfb_settings *bfb_settings); +void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata); +void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata); + +/** + * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance + * @num: Hardware context number of this context + * @pdev: Platform device associated wit this HW instance + * @attached_elm: List element for domains to track which devices are + * attached to them + * @attached_domain Domain currently attached to this context (if any) + * @name Human-readable name of this context device + * @sids List of Stream IDs mapped to this context + * @nsid Number of Stream IDs mapped to this context + * @secure_context true if this is a secure context programmed by + the secure environment, false otherwise + * @asid ASID used with this context. + * @attach_count Number of time this context has been attached. + * + * A msm_iommu_ctx_drvdata holds the driver data for a single context bank + * within each IOMMU hardware instance + */ +struct msm_iommu_ctx_drvdata { + int num; + struct platform_device *pdev; + struct list_head attached_elm; + struct iommu_domain *attached_domain; + const char *name; + u32 sids[MAX_NUM_SMR]; + unsigned int nsid; + unsigned int secure_context; + int asid; + int attach_count; + u32 sid_mask[MAX_NUM_SMR]; + unsigned int n_sid_mask; +}; + +enum dump_reg { + DUMP_REG_FIRST, + DUMP_REG_FAR0 = DUMP_REG_FIRST, + DUMP_REG_FAR1, + DUMP_REG_PAR0, + DUMP_REG_PAR1, + DUMP_REG_FSR, + DUMP_REG_FSYNR0, + DUMP_REG_FSYNR1, + DUMP_REG_TTBR0_0, + DUMP_REG_TTBR0_1, + DUMP_REG_TTBR1_0, + DUMP_REG_TTBR1_1, + DUMP_REG_SCTLR, + DUMP_REG_ACTLR, + DUMP_REG_PRRR, + DUMP_REG_MAIR0 = DUMP_REG_PRRR, + DUMP_REG_NMRR, + DUMP_REG_MAIR1 = DUMP_REG_NMRR, + DUMP_REG_CBAR_N, + DUMP_REG_CBFRSYNRA_N, + MAX_DUMP_REGS, +}; + +enum dump_reg_type { + DRT_CTX_REG, + DRT_GLOBAL_REG, + DRT_GLOBAL_REG_N, +}; + +enum model_id { + QSMMUv1 = 1, + QSMMUv2, + MMU_500 = 500, + MAX_MODEL, +}; + +struct dump_regs_tbl_entry { + /* + * To keep things context-bank-agnostic, we only store the + * register offset in `reg_offset' + */ + unsigned int reg_offset; + const char *name; + int must_be_present; + enum dump_reg_type dump_reg_type; +}; +extern struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS]; + +#define COMBINE_DUMP_REG(upper, lower) (((u64) upper << 32) | lower) + +struct msm_iommu_context_reg { + uint32_t val; + bool valid; +}; + +void print_ctx_regs(struct msm_iommu_context_reg regs[]); + +/* + * Interrupt handler for the IOMMU context fault interrupt. Hooking the + * interrupt is not supported in the API yet, but this will print an error + * message and dump useful IOMMU registers. + */ +irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id); +irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id); +irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id); +irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id); + +enum { + PROC_APPS, + PROC_GPU, + PROC_MAX +}; + +/* Expose structure to allow kgsl iommu driver to use the same structure to + * communicate to GPU the addresses of the flag and turn variables. + */ +struct remote_iommu_petersons_spinlock { + uint32_t flag[PROC_MAX]; + uint32_t turn; +}; + +#ifdef CONFIG_QCOM_IOMMU_V1 +void *msm_iommu_lock_initialize(void); +void msm_iommu_mutex_lock(void); +void msm_iommu_mutex_unlock(void); +void msm_set_iommu_access_ops(struct iommu_access_ops *ops); +struct iommu_access_ops *msm_get_iommu_access_ops(void); +#else +static inline void *msm_iommu_lock_initialize(void) +{ + return NULL; +} +static inline void msm_iommu_mutex_lock(void) { } +static inline void msm_iommu_mutex_unlock(void) { } +static inline void msm_set_iommu_access_ops(struct iommu_access_ops *ops) +{ + +} +static inline struct iommu_access_ops *msm_get_iommu_access_ops(void) +{ + return NULL; +} +#endif + +#ifdef CONFIG_QCOM_IOMMU_V1 +/* + * Look up an IOMMU context device by its context name. NULL if none found. + * Useful for testing and drivers that do not yet fully have IOMMU stuff in + * their platform devices. + */ +struct device *msm_iommu_get_ctx(const char *ctx_name); +#else +static inline struct device *msm_iommu_get_ctx(const char *ctx_name) +{ + return NULL; +} +#endif + +/* + * Function to program the global registers of an IOMMU securely. + * This should only be called on IOMMUs for which kernel programming + * of global registers is not possible + */ +void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops); +int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata, + struct msm_iommu_ctx_drvdata *ctx_drvdata); +int is_vfe_secure(void); + +#ifdef CONFIG_MSM_IOMMU_V0 +static inline int msm_soc_version_supports_iommu_v0(void) +{ + static int soc_supports_v0 = -1; +#ifdef CONFIG_OF + struct device_node *node; +#endif + + if (soc_supports_v0 != -1) + return soc_supports_v0; + +#ifdef CONFIG_OF + node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v0"); + if (node) { + soc_supports_v0 = 1; + of_node_put(node); + return 1; + } +#endif + if (cpu_is_msm8960() && + SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 2) { + soc_supports_v0 = 0; + return 0; + } + + if (cpu_is_msm8x60() && + (SOCINFO_VERSION_MAJOR(socinfo_get_version()) != 2 || + SOCINFO_VERSION_MINOR(socinfo_get_version()) < 1)) { + soc_supports_v0 = 0; + return 0; + } + + soc_supports_v0 = 1; + return 1; +} +#else +static inline int msm_soc_version_supports_iommu_v0(void) +{ + return 0; +} +#endif + +int msm_iommu_get_scm_call_avail(void); +void msm_iommu_check_scm_call_avail(void); + +u32 msm_iommu_get_mair0(void); +u32 msm_iommu_get_mair1(void); +u32 msm_iommu_get_prrr(void); +u32 msm_iommu_get_nmrr(void); + +/* events for notifiers passed to msm_iommu_register_notify */ +#define TLB_SYNC_TIMEOUT 1 + +#ifdef CONFIG_QCOM_IOMMU_V1 +void msm_iommu_register_notify(struct notifier_block *nb); +#else +static inline void msm_iommu_register_notify(struct notifier_block *nb) +{ +} +#endif + +#endif diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 24d388d74011..83836c432438 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -137,6 +137,8 @@ static void gic_enable_redist(bool enable) u32 count = 1000000; /* 1s! */ u32 val; + return; + rbase = gic_data_rdist_rd_base(); val = readl_relaxed(rbase + GICR_WAKER); @@ -538,8 +540,8 @@ static void gic_cpu_init(void) gic_cpu_config(rbase, gic_redist_wait_for_rwp); /* Give LPIs a spin */ - if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) - its_cpu_init(); + //if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) + // its_cpu_init(); /* initialise system registers */ gic_cpu_sys_reg_init(); @@ -962,8 +964,8 @@ static int __init gic_init_bases(void __iomem *dist_base, set_handle_irq(gic_handle_irq); - if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) - its_init(handle, &gic_data.rdists, gic_data.domain); + //if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) + // its_init(handle, &gic_data.rdists, gic_data.domain); gic_smp_init(); gic_dist_init(); diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 5a27bffa02fb..fc28b3240ff2 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -531,6 +531,18 @@ config VIDEO_OV2659 To compile this driver as a module, choose M here: the module will be called ov2659. +config VIDEO_OV5645 + tristate "OmniVision OV5645 sensor support" + depends on OF + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on MEDIA_CAMERA_SUPPORT + ---help--- + This is a Video4Linux2 sensor-level driver for the OmniVision + OV5645 camera. + + To compile this driver as a module, choose M here: the + module will be called ov5645. + config VIDEO_OV7640 tristate "OmniVision OV7640 sensor support" depends on I2C && VIDEO_V4L2 diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index 92773b2e6225..09a5d2448acf 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_VIDEO_VP27SMPX) += vp27smpx.o obj-$(CONFIG_VIDEO_SONY_BTF_MPX) += sony-btf-mpx.o obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o +obj-$(CONFIG_VIDEO_OV5645) += ov5645.o obj-$(CONFIG_VIDEO_OV7640) += ov7640.o obj-$(CONFIG_VIDEO_OV7670) += ov7670.o obj-$(CONFIG_VIDEO_OV9650) += ov9650.o diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c new file mode 100644 index 000000000000..b6a2d742e811 --- /dev/null +++ b/drivers/media/i2c/ov5645.c @@ -0,0 +1,1425 @@ +/* + * Driver for the OV5645 camera sensor. + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015 By Tech Design S.L. All Rights Reserved. + * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved. + * + * Based on: + * - the OV5645 driver from QC msm-3.10 kernel on codeaurora.org: + * https://us.codeaurora.org/cgit/quic/la/kernel/msm-3.10/tree/drivers/ + * media/platform/msm/camera_v2/sensor/ov5645.c?h=LA.BR.1.2.4_rb1.41 + * - the OV5640 driver posted on linux-media: + * https://www.mail-archive.com/linux-media%40vger.kernel.org/msg92671.html + */ + +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-of.h> +#include <media/v4l2-subdev.h> + +static DEFINE_MUTEX(ov5645_lock); + +#define OV5645_VOLTAGE_ANALOG 2800000 +#define OV5645_VOLTAGE_DIGITAL_CORE 1500000 +#define OV5645_VOLTAGE_DIGITAL_IO 1800000 + +#define OV5645_SYSTEM_CTRL0 0x3008 +#define OV5645_SYSTEM_CTRL0_START 0x02 +#define OV5645_SYSTEM_CTRL0_STOP 0x42 +#define OV5645_CHIP_ID_HIGH 0x300a +#define OV5645_CHIP_ID_HIGH_BYTE 0x56 +#define OV5645_CHIP_ID_LOW 0x300b +#define OV5645_CHIP_ID_LOW_BYTE 0x45 +#define OV5645_AWB_MANUAL_CONTROL 0x3406 +#define OV5645_AWB_MANUAL_ENABLE BIT(0) +#define OV5645_AEC_PK_MANUAL 0x3503 +#define OV5645_AEC_MANUAL_ENABLE BIT(0) +#define OV5645_AGC_MANUAL_ENABLE BIT(1) +#define OV5645_TIMING_TC_REG20 0x3820 +#define OV5645_SENSOR_VFLIP BIT(1) +#define OV5645_ISP_VFLIP BIT(2) +#define OV5645_TIMING_TC_REG21 0x3821 +#define OV5645_SENSOR_MIRROR BIT(1) +#define OV5645_PRE_ISP_TEST_SETTING_1 0x503d +#define OV5645_TEST_PATTERN_MASK 0x3 +#define OV5645_SET_TEST_PATTERN(x) ((x) & OV5645_TEST_PATTERN_MASK) +#define OV5645_TEST_PATTERN_ENABLE BIT(7) +#define OV5645_SDE_SAT_U 0x5583 +#define OV5645_SDE_SAT_V 0x5584 + +struct reg_value { + u16 reg; + u8 val; +}; + +struct ov5645_mode_info { + u32 width; + u32 height; + const struct reg_value *data; + u32 data_size; + u32 pixel_clock; + u32 link_freq; +}; + +struct ov5645 { + struct i2c_client *i2c_client; + struct device *dev; + struct v4l2_subdev sd; + struct media_pad pad; + struct v4l2_of_endpoint ep; + struct v4l2_mbus_framefmt fmt; + struct v4l2_rect crop; + struct clk *xclk; + + struct regulator *io_regulator; + struct regulator *core_regulator; + struct regulator *analog_regulator; + + const struct ov5645_mode_info *current_mode; + + struct v4l2_ctrl_handler ctrls; + struct v4l2_ctrl *pixel_clock; + struct v4l2_ctrl *link_freq; + + /* Cached register values */ + u8 aec_pk_manual; + u8 timing_tc_reg20; + u8 timing_tc_reg21; + + struct mutex power_lock; /* lock to protect power state */ + int power_count; + + struct gpio_desc *enable_gpio; + struct gpio_desc *rst_gpio; +}; + +static inline struct ov5645 *to_ov5645(struct v4l2_subdev *sd) +{ + return container_of(sd, struct ov5645, sd); +} + +static const struct reg_value ov5645_global_init_setting[] = { + { 0x3103, 0x11 }, + { 0x3008, 0x82 }, + { 0x3008, 0x42 }, + { 0x3103, 0x03 }, + { 0x3503, 0x07 }, + { 0x3002, 0x1c }, + { 0x3006, 0xc3 }, + { 0x300e, 0x45 }, + { 0x3017, 0x00 }, + { 0x3018, 0x00 }, + { 0x302e, 0x0b }, + { 0x3037, 0x13 }, + { 0x3108, 0x01 }, + { 0x3611, 0x06 }, + { 0x3500, 0x00 }, + { 0x3501, 0x01 }, + { 0x3502, 0x00 }, + { 0x350a, 0x00 }, + { 0x350b, 0x3f }, + { 0x3620, 0x33 }, + { 0x3621, 0xe0 }, + { 0x3622, 0x01 }, + { 0x3630, 0x2e }, + { 0x3631, 0x00 }, + { 0x3632, 0x32 }, + { 0x3633, 0x52 }, + { 0x3634, 0x70 }, + { 0x3635, 0x13 }, + { 0x3636, 0x03 }, + { 0x3703, 0x5a }, + { 0x3704, 0xa0 }, + { 0x3705, 0x1a }, + { 0x3709, 0x12 }, + { 0x370b, 0x61 }, + { 0x370f, 0x10 }, + { 0x3715, 0x78 }, + { 0x3717, 0x01 }, + { 0x371b, 0x20 }, + { 0x3731, 0x12 }, + { 0x3901, 0x0a }, + { 0x3905, 0x02 }, + { 0x3906, 0x10 }, + { 0x3719, 0x86 }, + { 0x3810, 0x00 }, + { 0x3811, 0x10 }, + { 0x3812, 0x00 }, + { 0x3821, 0x01 }, + { 0x3824, 0x01 }, + { 0x3826, 0x03 }, + { 0x3828, 0x08 }, + { 0x3a19, 0xf8 }, + { 0x3c01, 0x34 }, + { 0x3c04, 0x28 }, + { 0x3c05, 0x98 }, + { 0x3c07, 0x07 }, + { 0x3c09, 0xc2 }, + { 0x3c0a, 0x9c }, + { 0x3c0b, 0x40 }, + { 0x3c01, 0x34 }, + { 0x4001, 0x02 }, + { 0x4514, 0x00 }, + { 0x4520, 0xb0 }, + { 0x460b, 0x37 }, + { 0x460c, 0x20 }, + { 0x4818, 0x01 }, + { 0x481d, 0xf0 }, + { 0x481f, 0x50 }, + { 0x4823, 0x70 }, + { 0x4831, 0x14 }, + { 0x5000, 0xa7 }, + { 0x5001, 0x83 }, + { 0x501d, 0x00 }, + { 0x501f, 0x00 }, + { 0x503d, 0x00 }, + { 0x505c, 0x30 }, + { 0x5181, 0x59 }, + { 0x5183, 0x00 }, + { 0x5191, 0xf0 }, + { 0x5192, 0x03 }, + { 0x5684, 0x10 }, + { 0x5685, 0xa0 }, + { 0x5686, 0x0c }, + { 0x5687, 0x78 }, + { 0x5a00, 0x08 }, + { 0x5a21, 0x00 }, + { 0x5a24, 0x00 }, + { 0x3008, 0x02 }, + { 0x3503, 0x00 }, + { 0x5180, 0xff }, + { 0x5181, 0xf2 }, + { 0x5182, 0x00 }, + { 0x5183, 0x14 }, + { 0x5184, 0x25 }, + { 0x5185, 0x24 }, + { 0x5186, 0x09 }, + { 0x5187, 0x09 }, + { 0x5188, 0x0a }, + { 0x5189, 0x75 }, + { 0x518a, 0x52 }, + { 0x518b, 0xea }, + { 0x518c, 0xa8 }, + { 0x518d, 0x42 }, + { 0x518e, 0x38 }, + { 0x518f, 0x56 }, + { 0x5190, 0x42 }, + { 0x5191, 0xf8 }, + { 0x5192, 0x04 }, + { 0x5193, 0x70 }, + { 0x5194, 0xf0 }, + { 0x5195, 0xf0 }, + { 0x5196, 0x03 }, + { 0x5197, 0x01 }, + { 0x5198, 0x04 }, + { 0x5199, 0x12 }, + { 0x519a, 0x04 }, + { 0x519b, 0x00 }, + { 0x519c, 0x06 }, + { 0x519d, 0x82 }, + { 0x519e, 0x38 }, + { 0x5381, 0x1e }, + { 0x5382, 0x5b }, + { 0x5383, 0x08 }, + { 0x5384, 0x0a }, + { 0x5385, 0x7e }, + { 0x5386, 0x88 }, + { 0x5387, 0x7c }, + { 0x5388, 0x6c }, + { 0x5389, 0x10 }, + { 0x538a, 0x01 }, + { 0x538b, 0x98 }, + { 0x5300, 0x08 }, + { 0x5301, 0x30 }, + { 0x5302, 0x10 }, + { 0x5303, 0x00 }, + { 0x5304, 0x08 }, + { 0x5305, 0x30 }, + { 0x5306, 0x08 }, + { 0x5307, 0x16 }, + { 0x5309, 0x08 }, + { 0x530a, 0x30 }, + { 0x530b, 0x04 }, + { 0x530c, 0x06 }, + { 0x5480, 0x01 }, + { 0x5481, 0x08 }, + { 0x5482, 0x14 }, + { 0x5483, 0x28 }, + { 0x5484, 0x51 }, + { 0x5485, 0x65 }, + { 0x5486, 0x71 }, + { 0x5487, 0x7d }, + { 0x5488, 0x87 }, + { 0x5489, 0x91 }, + { 0x548a, 0x9a }, + { 0x548b, 0xaa }, + { 0x548c, 0xb8 }, + { 0x548d, 0xcd }, + { 0x548e, 0xdd }, + { 0x548f, 0xea }, + { 0x5490, 0x1d }, + { 0x5580, 0x02 }, + { 0x5583, 0x40 }, + { 0x5584, 0x10 }, + { 0x5589, 0x10 }, + { 0x558a, 0x00 }, + { 0x558b, 0xf8 }, + { 0x5800, 0x3f }, + { 0x5801, 0x16 }, + { 0x5802, 0x0e }, + { 0x5803, 0x0d }, + { 0x5804, 0x17 }, + { 0x5805, 0x3f }, + { 0x5806, 0x0b }, + { 0x5807, 0x06 }, + { 0x5808, 0x04 }, + { 0x5809, 0x04 }, + { 0x580a, 0x06 }, + { 0x580b, 0x0b }, + { 0x580c, 0x09 }, + { 0x580d, 0x03 }, + { 0x580e, 0x00 }, + { 0x580f, 0x00 }, + { 0x5810, 0x03 }, + { 0x5811, 0x08 }, + { 0x5812, 0x0a }, + { 0x5813, 0x03 }, + { 0x5814, 0x00 }, + { 0x5815, 0x00 }, + { 0x5816, 0x04 }, + { 0x5817, 0x09 }, + { 0x5818, 0x0f }, + { 0x5819, 0x08 }, + { 0x581a, 0x06 }, + { 0x581b, 0x06 }, + { 0x581c, 0x08 }, + { 0x581d, 0x0c }, + { 0x581e, 0x3f }, + { 0x581f, 0x1e }, + { 0x5820, 0x12 }, + { 0x5821, 0x13 }, + { 0x5822, 0x21 }, + { 0x5823, 0x3f }, + { 0x5824, 0x68 }, + { 0x5825, 0x28 }, + { 0x5826, 0x2c }, + { 0x5827, 0x28 }, + { 0x5828, 0x08 }, + { 0x5829, 0x48 }, + { 0x582a, 0x64 }, + { 0x582b, 0x62 }, + { 0x582c, 0x64 }, + { 0x582d, 0x28 }, + { 0x582e, 0x46 }, + { 0x582f, 0x62 }, + { 0x5830, 0x60 }, + { 0x5831, 0x62 }, + { 0x5832, 0x26 }, + { 0x5833, 0x48 }, + { 0x5834, 0x66 }, + { 0x5835, 0x44 }, + { 0x5836, 0x64 }, + { 0x5837, 0x28 }, + { 0x5838, 0x66 }, + { 0x5839, 0x48 }, + { 0x583a, 0x2c }, + { 0x583b, 0x28 }, + { 0x583c, 0x26 }, + { 0x583d, 0xae }, + { 0x5025, 0x00 }, + { 0x3a0f, 0x30 }, + { 0x3a10, 0x28 }, + { 0x3a1b, 0x30 }, + { 0x3a1e, 0x26 }, + { 0x3a11, 0x60 }, + { 0x3a1f, 0x14 }, + { 0x0601, 0x02 }, + { 0x3008, 0x42 }, + { 0x3008, 0x02 } +}; + +static const struct reg_value ov5645_setting_sxga[] = { + { 0x3612, 0xa9 }, + { 0x3614, 0x50 }, + { 0x3618, 0x00 }, + { 0x3034, 0x18 }, + { 0x3035, 0x21 }, + { 0x3036, 0x70 }, + { 0x3600, 0x09 }, + { 0x3601, 0x43 }, + { 0x3708, 0x66 }, + { 0x370c, 0xc3 }, + { 0x3800, 0x00 }, + { 0x3801, 0x00 }, + { 0x3802, 0x00 }, + { 0x3803, 0x06 }, + { 0x3804, 0x0a }, + { 0x3805, 0x3f }, + { 0x3806, 0x07 }, + { 0x3807, 0x9d }, + { 0x3808, 0x05 }, + { 0x3809, 0x00 }, + { 0x380a, 0x03 }, + { 0x380b, 0xc0 }, + { 0x380c, 0x07 }, + { 0x380d, 0x68 }, + { 0x380e, 0x03 }, + { 0x380f, 0xd8 }, + { 0x3813, 0x06 }, + { 0x3814, 0x31 }, + { 0x3815, 0x31 }, + { 0x3820, 0x47 }, + { 0x3a02, 0x03 }, + { 0x3a03, 0xd8 }, + { 0x3a08, 0x01 }, + { 0x3a09, 0xf8 }, + { 0x3a0a, 0x01 }, + { 0x3a0b, 0xa4 }, + { 0x3a0e, 0x02 }, + { 0x3a0d, 0x02 }, + { 0x3a14, 0x03 }, + { 0x3a15, 0xd8 }, + { 0x3a18, 0x00 }, + { 0x4004, 0x02 }, + { 0x4005, 0x18 }, + { 0x4300, 0x32 }, + { 0x4202, 0x00 } +}; + +static const struct reg_value ov5645_setting_1080p[] = { + { 0x3612, 0xab }, + { 0x3614, 0x50 }, + { 0x3618, 0x04 }, + { 0x3034, 0x18 }, + { 0x3035, 0x11 }, + { 0x3036, 0x54 }, + { 0x3600, 0x08 }, + { 0x3601, 0x33 }, + { 0x3708, 0x63 }, + { 0x370c, 0xc0 }, + { 0x3800, 0x01 }, + { 0x3801, 0x50 }, + { 0x3802, 0x01 }, + { 0x3803, 0xb2 }, + { 0x3804, 0x08 }, + { 0x3805, 0xef }, + { 0x3806, 0x05 }, + { 0x3807, 0xf1 }, + { 0x3808, 0x07 }, + { 0x3809, 0x80 }, + { 0x380a, 0x04 }, + { 0x380b, 0x38 }, + { 0x380c, 0x09 }, + { 0x380d, 0xc4 }, + { 0x380e, 0x04 }, + { 0x380f, 0x60 }, + { 0x3813, 0x04 }, + { 0x3814, 0x11 }, + { 0x3815, 0x11 }, + { 0x3820, 0x47 }, + { 0x4514, 0x88 }, + { 0x3a02, 0x04 }, + { 0x3a03, 0x60 }, + { 0x3a08, 0x01 }, + { 0x3a09, 0x50 }, + { 0x3a0a, 0x01 }, + { 0x3a0b, 0x18 }, + { 0x3a0e, 0x03 }, + { 0x3a0d, 0x04 }, + { 0x3a14, 0x04 }, + { 0x3a15, 0x60 }, + { 0x3a18, 0x00 }, + { 0x4004, 0x06 }, + { 0x4005, 0x18 }, + { 0x4300, 0x32 }, + { 0x4202, 0x00 }, + { 0x4837, 0x0b } +}; + +static const struct reg_value ov5645_setting_full[] = { + { 0x3612, 0xab }, + { 0x3614, 0x50 }, + { 0x3618, 0x04 }, + { 0x3034, 0x18 }, + { 0x3035, 0x11 }, + { 0x3036, 0x54 }, + { 0x3600, 0x08 }, + { 0x3601, 0x33 }, + { 0x3708, 0x63 }, + { 0x370c, 0xc0 }, + { 0x3800, 0x00 }, + { 0x3801, 0x00 }, + { 0x3802, 0x00 }, + { 0x3803, 0x00 }, + { 0x3804, 0x0a }, + { 0x3805, 0x3f }, + { 0x3806, 0x07 }, + { 0x3807, 0x9f }, + { 0x3808, 0x0a }, + { 0x3809, 0x20 }, + { 0x380a, 0x07 }, + { 0x380b, 0x98 }, + { 0x380c, 0x0b }, + { 0x380d, 0x1c }, + { 0x380e, 0x07 }, + { 0x380f, 0xb0 }, + { 0x3813, 0x06 }, + { 0x3814, 0x11 }, + { 0x3815, 0x11 }, + { 0x3820, 0x47 }, + { 0x4514, 0x88 }, + { 0x3a02, 0x07 }, + { 0x3a03, 0xb0 }, + { 0x3a08, 0x01 }, + { 0x3a09, 0x27 }, + { 0x3a0a, 0x00 }, + { 0x3a0b, 0xf6 }, + { 0x3a0e, 0x06 }, + { 0x3a0d, 0x08 }, + { 0x3a14, 0x07 }, + { 0x3a15, 0xb0 }, + { 0x3a18, 0x01 }, + { 0x4004, 0x06 }, + { 0x4005, 0x18 }, + { 0x4300, 0x32 }, + { 0x4837, 0x0b }, + { 0x4202, 0x00 } +}; + +static const s64 link_freq[] = { + 222880000, + 334320000 +}; + +static const struct ov5645_mode_info ov5645_mode_info_data[] = { + { + .width = 1280, + .height = 960, + .data = ov5645_setting_sxga, + .data_size = ARRAY_SIZE(ov5645_setting_sxga), + .pixel_clock = 111440000, + .link_freq = 0 /* an index in link_freq[] */ + }, + { + .width = 1920, + .height = 1080, + .data = ov5645_setting_1080p, + .data_size = ARRAY_SIZE(ov5645_setting_1080p), + .pixel_clock = 167160000, + .link_freq = 1 /* an index in link_freq[] */ + }, + { + .width = 2592, + .height = 1944, + .data = ov5645_setting_full, + .data_size = ARRAY_SIZE(ov5645_setting_full), + .pixel_clock = 167160000, + .link_freq = 1 /* an index in link_freq[] */ + }, +}; + +static int ov5645_regulators_enable(struct ov5645 *ov5645) +{ + int ret; + + ret = regulator_enable(ov5645->io_regulator); + if (ret < 0) { + dev_err(ov5645->dev, "set io voltage failed\n"); + return ret; + } + + ret = regulator_enable(ov5645->analog_regulator); + if (ret) { + dev_err(ov5645->dev, "set analog voltage failed\n"); + goto err_disable_io; + } + + ret = regulator_enable(ov5645->core_regulator); + if (ret) { + dev_err(ov5645->dev, "set core voltage failed\n"); + goto err_disable_analog; + } + + return 0; + +err_disable_analog: + regulator_disable(ov5645->analog_regulator); +err_disable_io: + regulator_disable(ov5645->io_regulator); + + return ret; +} + +static void ov5645_regulators_disable(struct ov5645 *ov5645) +{ + int ret; + + ret = regulator_disable(ov5645->core_regulator); + if (ret < 0) + dev_err(ov5645->dev, "core regulator disable failed\n"); + + ret = regulator_disable(ov5645->analog_regulator); + if (ret < 0) + dev_err(ov5645->dev, "analog regulator disable failed\n"); + + ret = regulator_disable(ov5645->io_regulator); + if (ret < 0) + dev_err(ov5645->dev, "io regulator disable failed\n"); +} + +static int ov5645_write_reg_to(struct ov5645 *ov5645, u16 reg, u8 val, u16 i2c_addr) +{ + u8 regbuf[3] = { + reg >> 8, + reg & 0xff, + val + }; + struct i2c_msg msgs = { + .addr = i2c_addr, + .flags = 0, + .len = 3, + .buf = regbuf + }; + int ret; + + ret = i2c_transfer(ov5645->i2c_client->adapter, &msgs, 1); + if (ret < 0) + dev_err(ov5645->dev, + "%s: write reg error %d on addr 0x%x: reg=0x%x, val=0x%x\n", + __func__, ret, i2c_addr, reg, val); + + return ret; +} + +static int ov5645_write_reg(struct ov5645 *ov5645, u16 reg, u8 val) +{ + u8 regbuf[3]; + int ret; + + regbuf[0] = reg >> 8; + regbuf[1] = reg & 0xff; + regbuf[2] = val; + + ret = i2c_master_send(ov5645->i2c_client, regbuf, 3); + if (ret < 0) + dev_err(ov5645->dev, "%s: write reg error %d: reg=%x, val=%x\n", + __func__, ret, reg, val); + + return ret; +} + +static int ov5645_read_reg(struct ov5645 *ov5645, u16 reg, u8 *val) +{ + u8 regbuf[2]; + int ret; + + regbuf[0] = reg >> 8; + regbuf[1] = reg & 0xff; + + ret = i2c_master_send(ov5645->i2c_client, regbuf, 2); + if (ret < 0) { + dev_err(ov5645->dev, "%s: write reg error %d: reg=%x\n", + __func__, ret, reg); + return ret; + } + + ret = i2c_master_recv(ov5645->i2c_client, val, 1); + if (ret < 0) { + dev_err(ov5645->dev, "%s: read reg error %d: reg=%x\n", + __func__, ret, reg); + return ret; + } + + return 0; +} + +static int ov5645_set_aec_mode(struct ov5645 *ov5645, u32 mode) +{ + u8 val = ov5645->aec_pk_manual; + int ret; + + if (mode == V4L2_EXPOSURE_AUTO) + val &= ~OV5645_AEC_MANUAL_ENABLE; + else /* V4L2_EXPOSURE_MANUAL */ + val |= OV5645_AEC_MANUAL_ENABLE; + + ret = ov5645_write_reg(ov5645, OV5645_AEC_PK_MANUAL, val); + if (!ret) + ov5645->aec_pk_manual = val; + + return ret; +} + +static int ov5645_set_agc_mode(struct ov5645 *ov5645, u32 enable) +{ + u8 val = ov5645->aec_pk_manual; + int ret; + + if (enable) + val &= ~OV5645_AGC_MANUAL_ENABLE; + else + val |= OV5645_AGC_MANUAL_ENABLE; + + ret = ov5645_write_reg(ov5645, OV5645_AEC_PK_MANUAL, val); + if (!ret) + ov5645->aec_pk_manual = val; + + return ret; +} + +static int ov5645_set_register_array(struct ov5645 *ov5645, + const struct reg_value *settings, + unsigned int num_settings) +{ + unsigned int i; + int ret; + + for (i = 0; i < num_settings; ++i, ++settings) { + ret = ov5645_write_reg(ov5645, settings->reg, settings->val); + if (ret < 0) + return ret; + } + + return 0; +} + +static int ov5645_set_power_on(struct ov5645 *ov5645) +{ + int ret; + + ret = ov5645_regulators_enable(ov5645); + if (ret < 0) { + return ret; + } + + ret = clk_prepare_enable(ov5645->xclk); + if (ret < 0) { + dev_err(ov5645->dev, "clk prepare enable failed\n"); + ov5645_regulators_disable(ov5645); + return ret; + } + + usleep_range(5000, 15000); + gpiod_set_value_cansleep(ov5645->enable_gpio, 1); + + usleep_range(1000, 2000); + gpiod_set_value_cansleep(ov5645->rst_gpio, 0); + + msleep(20); + + return 0; +} + +static void ov5645_set_power_off(struct ov5645 *ov5645) +{ + gpiod_set_value_cansleep(ov5645->rst_gpio, 1); + gpiod_set_value_cansleep(ov5645->enable_gpio, 0); + clk_disable_unprepare(ov5645->xclk); + ov5645_regulators_disable(ov5645); +} + +static int ov5645_s_power(struct v4l2_subdev *sd, int on) +{ + struct ov5645 *ov5645 = to_ov5645(sd); + int ret = 0; + + mutex_lock(&ov5645->power_lock); + + /* If the power count is modified from 0 to != 0 or from != 0 to 0, + * update the power state. + */ + if (ov5645->power_count == !on) { + if (on) { + mutex_lock(&ov5645_lock); + + ret = ov5645_set_power_on(ov5645); + if (ret < 0) + goto exit; + + ret = ov5645_write_reg_to(ov5645, 0x3100, + ov5645->i2c_client->addr, 0x78); + if (ret < 0) { + dev_err(ov5645->dev, + "could not change i2c address\n"); + ov5645_set_power_off(ov5645); + mutex_unlock(&ov5645_lock); + goto exit; + } + + mutex_unlock(&ov5645_lock); + + ret = ov5645_set_register_array(ov5645, + ov5645_global_init_setting, + ARRAY_SIZE(ov5645_global_init_setting)); + if (ret < 0) { + dev_err(ov5645->dev, + "could not set init registers\n"); + ov5645_set_power_off(ov5645); + goto exit; + } + + ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0, + OV5645_SYSTEM_CTRL0_STOP); + if (ret < 0) { + ov5645_set_power_off(ov5645); + goto exit; + } + } else { + ov5645_set_power_off(ov5645); + } + } + + /* Update the power count. */ + ov5645->power_count += on ? 1 : -1; + WARN_ON(ov5645->power_count < 0); + +exit: + mutex_unlock(&ov5645->power_lock); + + return ret; +} + +static int ov5645_set_saturation(struct ov5645 *ov5645, s32 value) +{ + u32 reg_value = (value * 0x10) + 0x40; + int ret; + + ret = ov5645_write_reg(ov5645, OV5645_SDE_SAT_U, reg_value); + if (ret < 0) + return ret; + + return ov5645_write_reg(ov5645, OV5645_SDE_SAT_V, reg_value); +} + +static int ov5645_set_hflip(struct ov5645 *ov5645, s32 value) +{ + u8 val = ov5645->timing_tc_reg21; + int ret; + + if (value == 0) + val &= ~(OV5645_SENSOR_MIRROR); + else + val |= (OV5645_SENSOR_MIRROR); + + ret = ov5645_write_reg(ov5645, OV5645_TIMING_TC_REG21, val); + if (!ret) + ov5645->timing_tc_reg21 = val; + + return ret; +} + +static int ov5645_set_vflip(struct ov5645 *ov5645, s32 value) +{ + u8 val = ov5645->timing_tc_reg20; + int ret; + + if (value == 0) + val |= (OV5645_SENSOR_VFLIP | OV5645_ISP_VFLIP); + else + val &= ~(OV5645_SENSOR_VFLIP | OV5645_ISP_VFLIP); + + ret = ov5645_write_reg(ov5645, OV5645_TIMING_TC_REG20, val); + if (!ret) + ov5645->timing_tc_reg20 = val; + + return ret; +} + +static int ov5645_set_test_pattern(struct ov5645 *ov5645, s32 value) +{ + u8 val = 0; + + if (value) { + val = OV5645_SET_TEST_PATTERN(value - 1); + val |= OV5645_TEST_PATTERN_ENABLE; + } + + return ov5645_write_reg(ov5645, OV5645_PRE_ISP_TEST_SETTING_1, val); +} + +static const char * const ov5645_test_pattern_menu[] = { + "Disabled", + "Vertical Color Bars", + "Pseudo-Random Data", + "Color Square", + "Black Image", +}; + +static int ov5645_set_awb(struct ov5645 *ov5645, s32 enable_auto) +{ + u8 val = 0; + + if (!enable_auto) + val = OV5645_AWB_MANUAL_ENABLE; + + return ov5645_write_reg(ov5645, OV5645_AWB_MANUAL_CONTROL, val); +} + +static int ov5645_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct ov5645 *ov5645 = container_of(ctrl->handler, + struct ov5645, ctrls); + int ret; + + mutex_lock(&ov5645->power_lock); + if (!ov5645->power_count) { + mutex_unlock(&ov5645->power_lock); + return 0; + } + + switch (ctrl->id) { + case V4L2_CID_SATURATION: + ret = ov5645_set_saturation(ov5645, ctrl->val); + break; + case V4L2_CID_AUTO_WHITE_BALANCE: + ret = ov5645_set_awb(ov5645, ctrl->val); + break; + case V4L2_CID_AUTOGAIN: + ret = ov5645_set_agc_mode(ov5645, ctrl->val); + break; + case V4L2_CID_EXPOSURE_AUTO: + ret = ov5645_set_aec_mode(ov5645, ctrl->val); + break; + case V4L2_CID_TEST_PATTERN: + ret = ov5645_set_test_pattern(ov5645, ctrl->val); + break; + case V4L2_CID_HFLIP: + ret = ov5645_set_hflip(ov5645, ctrl->val); + break; + case V4L2_CID_VFLIP: + ret = ov5645_set_vflip(ov5645, ctrl->val); + break; + default: + ret = -EINVAL; + break; + } + + mutex_unlock(&ov5645->power_lock); + + return ret; +} + +static struct v4l2_ctrl_ops ov5645_ctrl_ops = { + .s_ctrl = ov5645_s_ctrl, +}; + +static int ov5645_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + if (code->index > 0) + return -EINVAL; + + code->code = MEDIA_BUS_FMT_UYVY8_2X8; + + return 0; +} + +static int ov5645_enum_frame_size(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + if (fse->code != MEDIA_BUS_FMT_UYVY8_2X8) + return -EINVAL; + + if (fse->index >= ARRAY_SIZE(ov5645_mode_info_data)) + return -EINVAL; + + fse->min_width = ov5645_mode_info_data[fse->index].width; + fse->max_width = ov5645_mode_info_data[fse->index].width; + fse->min_height = ov5645_mode_info_data[fse->index].height; + fse->max_height = ov5645_mode_info_data[fse->index].height; + + return 0; +} + +static struct v4l2_mbus_framefmt * +__ov5645_get_pad_format(struct ov5645 *ov5645, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + switch (which) { + case V4L2_SUBDEV_FORMAT_TRY: + return v4l2_subdev_get_try_format(&ov5645->sd, cfg, pad); + case V4L2_SUBDEV_FORMAT_ACTIVE: + return &ov5645->fmt; + default: + return NULL; + } +} + +static int ov5645_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *format) +{ + struct ov5645 *ov5645 = to_ov5645(sd); + + format->format = *__ov5645_get_pad_format(ov5645, cfg, format->pad, + format->which); + return 0; +} + +static struct v4l2_rect * +__ov5645_get_pad_crop(struct ov5645 *ov5645, struct v4l2_subdev_pad_config *cfg, + unsigned int pad, enum v4l2_subdev_format_whence which) +{ + switch (which) { + case V4L2_SUBDEV_FORMAT_TRY: + return v4l2_subdev_get_try_crop(&ov5645->sd, cfg, pad); + case V4L2_SUBDEV_FORMAT_ACTIVE: + return &ov5645->crop; + default: + return NULL; + } +} + +static const struct ov5645_mode_info * +ov5645_find_nearest_mode(unsigned int width, unsigned int height) +{ + int i; + + for (i = ARRAY_SIZE(ov5645_mode_info_data) - 1; i >= 0; i--) { + if (ov5645_mode_info_data[i].width <= width && + ov5645_mode_info_data[i].height <= height) + break; + } + + if (i < 0) + i = 0; + + return &ov5645_mode_info_data[i]; +} + +static int ov5645_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *format) +{ + struct ov5645 *ov5645 = to_ov5645(sd); + struct v4l2_mbus_framefmt *__format; + struct v4l2_rect *__crop; + const struct ov5645_mode_info *new_mode; + int ret; + + __crop = __ov5645_get_pad_crop(ov5645, cfg, format->pad, + format->which); + + new_mode = ov5645_find_nearest_mode(format->format.width, + format->format.height); + __crop->width = new_mode->width; + __crop->height = new_mode->height; + + if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) { + ret = v4l2_ctrl_s_ctrl_int64(ov5645->pixel_clock, + new_mode->pixel_clock); + if (ret < 0) + return ret; + + ret = v4l2_ctrl_s_ctrl(ov5645->link_freq, + new_mode->link_freq); + if (ret < 0) + return ret; + + ov5645->current_mode = new_mode; + } + + __format = __ov5645_get_pad_format(ov5645, cfg, format->pad, + format->which); + __format->width = __crop->width; + __format->height = __crop->height; + __format->code = MEDIA_BUS_FMT_UYVY8_2X8; + __format->field = V4L2_FIELD_NONE; + __format->colorspace = V4L2_COLORSPACE_SRGB; + + format->format = *__format; + + return 0; +} + +static int ov5645_entity_init_cfg(struct v4l2_subdev *subdev, + struct v4l2_subdev_pad_config *cfg) +{ + struct v4l2_subdev_format fmt = { 0 }; + + fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; + fmt.format.width = 1920; + fmt.format.height = 1080; + + ov5645_set_format(subdev, cfg, &fmt); + + return 0; +} + +static int ov5645_get_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct ov5645 *ov5645 = to_ov5645(sd); + + if (sel->target != V4L2_SEL_TGT_CROP) + return -EINVAL; + + sel->r = *__ov5645_get_pad_crop(ov5645, cfg, sel->pad, + sel->which); + return 0; +} + +static int ov5645_s_stream(struct v4l2_subdev *subdev, int enable) +{ + struct ov5645 *ov5645 = to_ov5645(subdev); + int ret; + + if (enable) { + ret = ov5645_set_register_array(ov5645, + ov5645->current_mode->data, + ov5645->current_mode->data_size); + if (ret < 0) { + dev_err(ov5645->dev, "could not set mode %dx%d\n", + ov5645->current_mode->width, + ov5645->current_mode->height); + return ret; + } + ret = v4l2_ctrl_handler_setup(&ov5645->ctrls); + if (ret < 0) { + dev_err(ov5645->dev, "could not sync v4l2 controls\n"); + return ret; + } + ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0, + OV5645_SYSTEM_CTRL0_START); + if (ret < 0) + return ret; + } else { + ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0, + OV5645_SYSTEM_CTRL0_STOP); + if (ret < 0) + return ret; + } + + return 0; +} + +static const struct v4l2_subdev_core_ops ov5645_core_ops = { + .s_power = ov5645_s_power, +}; + +static const struct v4l2_subdev_video_ops ov5645_video_ops = { + .s_stream = ov5645_s_stream, +}; + +static const struct v4l2_subdev_pad_ops ov5645_subdev_pad_ops = { + .init_cfg = ov5645_entity_init_cfg, + .enum_mbus_code = ov5645_enum_mbus_code, + .enum_frame_size = ov5645_enum_frame_size, + .get_fmt = ov5645_get_format, + .set_fmt = ov5645_set_format, + .get_selection = ov5645_get_selection, +}; + +static const struct v4l2_subdev_ops ov5645_subdev_ops = { + .core = &ov5645_core_ops, + .video = &ov5645_video_ops, + .pad = &ov5645_subdev_pad_ops, +}; + +static int ov5645_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device_node *endpoint; + struct ov5645 *ov5645; + u8 chip_id_high, chip_id_low; + u32 xclk_freq; + int ret; + + ov5645 = devm_kzalloc(dev, sizeof(struct ov5645), GFP_KERNEL); + if (!ov5645) + return -ENOMEM; + + ov5645->i2c_client = client; + ov5645->dev = dev; + + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (!endpoint) { + dev_err(dev, "endpoint node not found\n"); + return -EINVAL; + } + + ret = v4l2_of_parse_endpoint(endpoint, &ov5645->ep); + if (ret < 0) { + dev_err(dev, "parsing endpoint node failed\n"); + return ret; + } + + of_node_put(endpoint); + + if (ov5645->ep.bus_type != V4L2_MBUS_CSI2) { + dev_err(dev, "invalid bus type, must be CSI2\n"); + return -EINVAL; + } + + /* get system clock (xclk) */ + ov5645->xclk = devm_clk_get(dev, "xclk"); + if (IS_ERR(ov5645->xclk)) { + dev_err(dev, "could not get xclk"); + return PTR_ERR(ov5645->xclk); + } + + ret = of_property_read_u32(dev->of_node, "clock-frequency", &xclk_freq); + if (ret) { + dev_err(dev, "could not get xclk frequency\n"); + return ret; + } + + if (xclk_freq != 23880000) { + dev_err(dev, "external clock frequency %u is not supported\n", + xclk_freq); + return -EINVAL; + } + + ret = clk_set_rate(ov5645->xclk, xclk_freq); + if (ret) { + dev_err(dev, "could not set xclk frequency\n"); + return ret; + } + + ov5645->io_regulator = devm_regulator_get(dev, "vdddo"); + if (IS_ERR(ov5645->io_regulator)) { + dev_err(dev, "cannot get io regulator\n"); + return PTR_ERR(ov5645->io_regulator); + } + + ret = regulator_set_voltage(ov5645->io_regulator, + OV5645_VOLTAGE_DIGITAL_IO, + OV5645_VOLTAGE_DIGITAL_IO); + if (ret < 0) { + dev_err(dev, "cannot set io voltage\n"); + return ret; + } + + ov5645->core_regulator = devm_regulator_get(dev, "vddd"); + if (IS_ERR(ov5645->core_regulator)) { + dev_err(dev, "cannot get core regulator\n"); + return PTR_ERR(ov5645->core_regulator); + } + + ret = regulator_set_voltage(ov5645->core_regulator, + OV5645_VOLTAGE_DIGITAL_CORE, + OV5645_VOLTAGE_DIGITAL_CORE); + if (ret < 0) { + dev_err(dev, "cannot set core voltage\n"); + return ret; + } + + ov5645->analog_regulator = devm_regulator_get(dev, "vdda"); + if (IS_ERR(ov5645->analog_regulator)) { + dev_err(dev, "cannot get analog regulator\n"); + return PTR_ERR(ov5645->analog_regulator); + } + + ret = regulator_set_voltage(ov5645->analog_regulator, + OV5645_VOLTAGE_ANALOG, + OV5645_VOLTAGE_ANALOG); + if (ret < 0) { + dev_err(dev, "cannot set analog voltage\n"); + return ret; + } + + ov5645->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH); + if (IS_ERR(ov5645->enable_gpio)) { + dev_err(dev, "cannot get enable gpio\n"); + return PTR_ERR(ov5645->enable_gpio); + } + + ov5645->rst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ov5645->rst_gpio)) { + dev_err(dev, "cannot get reset gpio\n"); + return PTR_ERR(ov5645->rst_gpio); + } + + mutex_init(&ov5645->power_lock); + + v4l2_ctrl_handler_init(&ov5645->ctrls, 9); + v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops, + V4L2_CID_SATURATION, -4, 4, 1, 0); + v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops, + V4L2_CID_HFLIP, 0, 1, 1, 0); + v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops, + V4L2_CID_VFLIP, 0, 1, 1, 0); + v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops, + V4L2_CID_AUTOGAIN, 0, 1, 1, 1); + v4l2_ctrl_new_std(&ov5645->ctrls, &ov5645_ctrl_ops, + V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); + v4l2_ctrl_new_std_menu(&ov5645->ctrls, &ov5645_ctrl_ops, + V4L2_CID_EXPOSURE_AUTO, V4L2_EXPOSURE_MANUAL, + 0, V4L2_EXPOSURE_AUTO); + v4l2_ctrl_new_std_menu_items(&ov5645->ctrls, &ov5645_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(ov5645_test_pattern_menu) - 1, + 0, 0, ov5645_test_pattern_menu); + ov5645->pixel_clock = v4l2_ctrl_new_std(&ov5645->ctrls, + &ov5645_ctrl_ops, + V4L2_CID_PIXEL_RATE, + 1, INT_MAX, 1, 1); + ov5645->link_freq = v4l2_ctrl_new_int_menu(&ov5645->ctrls, + &ov5645_ctrl_ops, + V4L2_CID_LINK_FREQ, + ARRAY_SIZE(link_freq) - 1, + 0, link_freq); + if (ov5645->link_freq) + ov5645->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + ov5645->sd.ctrl_handler = &ov5645->ctrls; + + if (ov5645->ctrls.error) { + dev_err(dev, "%s: control initialization error %d\n", + __func__, ov5645->ctrls.error); + ret = ov5645->ctrls.error; + goto free_ctrl; + } + + v4l2_i2c_subdev_init(&ov5645->sd, client, &ov5645_subdev_ops); + ov5645->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + ov5645->pad.flags = MEDIA_PAD_FL_SOURCE; + ov5645->sd.dev = &client->dev; + ov5645->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; + + ret = media_entity_pads_init(&ov5645->sd.entity, 1, &ov5645->pad); + if (ret < 0) { + dev_err(dev, "could not register media entity\n"); + goto free_ctrl; + } + + ret = ov5645_s_power(&ov5645->sd, true); + if (ret < 0) { + dev_err(dev, "could not power up OV5645\n"); + goto free_entity; + } + + ret = ov5645_read_reg(ov5645, OV5645_CHIP_ID_HIGH, &chip_id_high); + if (ret < 0 || chip_id_high != OV5645_CHIP_ID_HIGH_BYTE) { + dev_err(dev, "could not read ID high\n"); + ret = -ENODEV; + goto power_down; + } + ret = ov5645_read_reg(ov5645, OV5645_CHIP_ID_LOW, &chip_id_low); + if (ret < 0 || chip_id_low != OV5645_CHIP_ID_LOW_BYTE) { + dev_err(dev, "could not read ID low\n"); + ret = -ENODEV; + goto power_down; + } + + dev_info(dev, "OV5645 detected at address 0x%02x\n", client->addr); + + ret = ov5645_read_reg(ov5645, OV5645_AEC_PK_MANUAL, + &ov5645->aec_pk_manual); + if (ret < 0) { + dev_err(dev, "could not read AEC/AGC mode\n"); + ret = -ENODEV; + goto power_down; + } + + ret = ov5645_read_reg(ov5645, OV5645_TIMING_TC_REG20, + &ov5645->timing_tc_reg20); + if (ret < 0) { + dev_err(dev, "could not read vflip value\n"); + ret = -ENODEV; + goto power_down; + } + + ret = ov5645_read_reg(ov5645, OV5645_TIMING_TC_REG21, + &ov5645->timing_tc_reg21); + if (ret < 0) { + dev_err(dev, "could not read hflip value\n"); + ret = -ENODEV; + goto power_down; + } + + ov5645_s_power(&ov5645->sd, false); + + ret = v4l2_async_register_subdev(&ov5645->sd); + if (ret < 0) { + dev_err(dev, "could not register v4l2 device\n"); + goto free_entity; + } + + ov5645_entity_init_cfg(&ov5645->sd, NULL); + + return 0; + +power_down: + ov5645_s_power(&ov5645->sd, false); +free_entity: + media_entity_cleanup(&ov5645->sd.entity); +free_ctrl: + v4l2_ctrl_handler_free(&ov5645->ctrls); + mutex_destroy(&ov5645->power_lock); + + return ret; +} + +static int ov5645_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ov5645 *ov5645 = to_ov5645(sd); + + v4l2_async_unregister_subdev(&ov5645->sd); + media_entity_cleanup(&ov5645->sd.entity); + v4l2_ctrl_handler_free(&ov5645->ctrls); + mutex_destroy(&ov5645->power_lock); + + return 0; +} + +static const struct i2c_device_id ov5645_id[] = { + { "ov5645", 0 }, + {} +}; +MODULE_DEVICE_TABLE(i2c, ov5645_id); + +static const struct of_device_id ov5645_of_match[] = { + { .compatible = "ovti,ov5645" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ov5645_of_match); + +static struct i2c_driver ov5645_i2c_driver = { + .driver = { + .of_match_table = of_match_ptr(ov5645_of_match), + .name = "ov5645", + }, + .probe = ov5645_probe, + .remove = ov5645_remove, + .id_table = ov5645_id, +}; + +module_i2c_driver(ov5645_i2c_driver); + +MODULE_DESCRIPTION("Omnivision OV5645 Camera Driver"); +MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c index 98b067b712b3..fd7ddd441190 100644 --- a/drivers/media/media-entity.c +++ b/drivers/media/media-entity.c @@ -854,7 +854,7 @@ media_entity_find_link(struct media_pad *source, struct media_pad *sink) } EXPORT_SYMBOL_GPL(media_entity_find_link); -struct media_pad *media_entity_remote_pad(struct media_pad *pad) +struct media_pad *media_entity_remote_pad(const struct media_pad *pad) { struct media_link *link; diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 5ff803efdc03..ff094b13b6b3 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -100,6 +100,12 @@ config VIDEO_PXA27x ---help--- This is a v4l2 driver for the PXA27x Quick Capture Interface +config VIDEO_QCOM_CAMSS + tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver" + depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST + select VIDEOBUF2_DMA_SG + config VIDEO_S3C_CAMIF tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver" depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API @@ -345,6 +351,20 @@ config VIDEO_TI_VPE_DEBUG ---help--- Enable debug messages on VPE driver. +config VIDEO_QCOM_VENUS + tristate "Qualcomm Venus V4L2 encoder/decoder driver" + depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA + depends on ARCH_QCOM && OF + depends on IOMMU_DMA + select QCOM_MDT_LOADER + select VIDEOBUF2_DMA_SG + select V4L2_MEM2MEM_DEV + ---help--- + This is a V4L2 driver for Qualcomm Venus video accelerator + hardware. It accelerates encoding and decoding operations + on various Qualcomm SoCs. + To compile this driver as a module choose m here. + endif # V4L_MEM2MEM_DRIVERS menuconfig V4L_TEST_DRIVERS diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index 40b18d12726e..9689fcf476b7 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -66,3 +66,7 @@ ccflags-y += -I$(srctree)/drivers/media/i2c obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu/ obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec/ + +obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom/camss-8x16/ + +obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/ diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c index bbb5feef8308..2a6f64873856 100644 --- a/drivers/media/platform/exynos-gsc/gsc-core.c +++ b/drivers/media/platform/exynos-gsc/gsc-core.c @@ -530,9 +530,9 @@ int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr) } pr_debug("user put w: %d, h: %d", cr->c.width, cr->c.height); - if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) f = &ctx->d_frame; - else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) f = &ctx->s_frame; else return -EINVAL; diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c index 9f03b791b711..65ddf3075f89 100644 --- a/drivers/media/platform/exynos-gsc/gsc-m2m.c +++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c @@ -449,8 +449,8 @@ static int gsc_m2m_g_selection(struct file *file, void *fh, struct gsc_frame *frame; struct gsc_ctx *ctx = fh_to_ctx(fh); - if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) && - (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) + if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && + (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)) return -EINVAL; frame = ctx_get_frame(ctx, s->type); @@ -492,8 +492,8 @@ static int gsc_m2m_s_selection(struct file *file, void *fh, cr.type = s->type; cr.c = s->r; - if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) && - (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) + if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && + (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)) return -EINVAL; ret = gsc_try_crop(ctx, &cr); diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c index 964f4a681934..769167790802 100644 --- a/drivers/media/platform/exynos4-is/fimc-capture.c +++ b/drivers/media/platform/exynos4-is/fimc-capture.c @@ -1270,7 +1270,7 @@ static int fimc_cap_g_selection(struct file *file, void *fh, struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct fimc_frame *f = &ctx->s_frame; - if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; switch (s->target) { @@ -1320,7 +1320,7 @@ static int fimc_cap_s_selection(struct file *file, void *fh, struct fimc_frame *f; unsigned long flags; - if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (s->target == V4L2_SEL_TGT_COMPOSE) diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c index b91abf1c4d43..0762659c7244 100644 --- a/drivers/media/platform/exynos4-is/fimc-lite.c +++ b/drivers/media/platform/exynos4-is/fimc-lite.c @@ -901,7 +901,7 @@ static int fimc_lite_g_selection(struct file *file, void *fh, struct fimc_lite *fimc = video_drvdata(file); struct flite_frame *f = &fimc->out_frame; - if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; switch (sel->target) { @@ -929,7 +929,7 @@ static int fimc_lite_s_selection(struct file *file, void *fh, struct v4l2_rect rect = sel->r; unsigned long flags; - if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || + if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || sel->target != V4L2_SEL_TGT_COMPOSE) return -EINVAL; diff --git a/drivers/media/platform/qcom/camss-8x16/Makefile b/drivers/media/platform/qcom/camss-8x16/Makefile new file mode 100644 index 000000000000..3c4024fbb768 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/Makefile @@ -0,0 +1,11 @@ +# Makefile for Qualcomm CAMSS driver + +qcom-camss-objs += \ + camss.o \ + camss-csid.o \ + camss-csiphy.o \ + camss-ispif.o \ + camss-vfe.o \ + camss-video.o \ + +obj-$(CONFIG_VIDEO_QCOM_CAMSS) += qcom-camss.o diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss-8x16/camss-csid.c new file mode 100644 index 000000000000..64df82817de3 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.c @@ -0,0 +1,1092 @@ +/* + * camss-csid.c + * + * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> + +#include "camss-csid.h" +#include "camss.h" + +#define MSM_CSID_NAME "msm_csid" + +#define CAMSS_CSID_HW_VERSION 0x0 +#define CAMSS_CSID_CORE_CTRL_0 0x004 +#define CAMSS_CSID_CORE_CTRL_1 0x008 +#define CAMSS_CSID_RST_CMD 0x00c +#define CAMSS_CSID_CID_LUT_VC_n(n) (0x010 + 0x4 * (n)) +#define CAMSS_CSID_CID_n_CFG(n) (0x020 + 0x4 * (n)) +#define CAMSS_CSID_IRQ_CLEAR_CMD 0x060 +#define CAMSS_CSID_IRQ_MASK 0x064 +#define CAMSS_CSID_IRQ_STATUS 0x068 +#define CAMSS_CSID_TG_CTRL 0x0a0 +#define CAMSS_CSID_TG_CTRL_DISABLE 0xa06436 +#define CAMSS_CSID_TG_CTRL_ENABLE 0xa06437 +#define CAMSS_CSID_TG_VC_CFG 0x0a4 +#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff +#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f +#define CAMSS_CSID_TG_DT_n_CGG_0(n) (0x0ac + 0xc * (n)) +#define CAMSS_CSID_TG_DT_n_CGG_1(n) (0x0b0 + 0xc * (n)) +#define CAMSS_CSID_TG_DT_n_CGG_2(n) (0x0b4 + 0xc * (n)) + +#define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12 +#define DATA_TYPE_YUV422_8BIT 0x1e +#define DATA_TYPE_RAW_6BIT 0x28 +#define DATA_TYPE_RAW_8BIT 0x2a +#define DATA_TYPE_RAW_10BIT 0x2b +#define DATA_TYPE_RAW_12BIT 0x2c + +#define DECODE_FORMAT_UNCOMPRESSED_6_BIT 0x0 +#define DECODE_FORMAT_UNCOMPRESSED_8_BIT 0x1 +#define DECODE_FORMAT_UNCOMPRESSED_10_BIT 0x2 +#define DECODE_FORMAT_UNCOMPRESSED_12_BIT 0x3 + +#define CSID_RESET_TIMEOUT_MS 500 + +struct csid_fmts { + u32 code; + u8 data_type; + u8 decode_format; + u8 bpp; + u8 spp; /* bus samples per pixel */ +}; + +static const struct csid_fmts csid_input_fmts[] = { + { + MEDIA_BUS_FMT_UYVY8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_VYUY8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_YUYV8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_YVYU8_2X8, + DATA_TYPE_YUV422_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 2, + }, + { + MEDIA_BUS_FMT_SBGGR8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB8_1X8, + DATA_TYPE_RAW_8BIT, + DECODE_FORMAT_UNCOMPRESSED_8_BIT, + 8, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB10_1X10, + DATA_TYPE_RAW_10BIT, + DECODE_FORMAT_UNCOMPRESSED_10_BIT, + 10, + 1, + }, + { + MEDIA_BUS_FMT_SBGGR12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SGBRG12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SGRBG12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + }, + { + MEDIA_BUS_FMT_SRGGB12_1X12, + DATA_TYPE_RAW_12BIT, + DECODE_FORMAT_UNCOMPRESSED_12_BIT, + 12, + 1, + } +}; + +static const struct csid_fmts *csid_get_fmt_entry(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++) + if (code == csid_input_fmts[i].code) + return &csid_input_fmts[i]; + + WARN(1, "Unknown format\n"); + + return &csid_input_fmts[0]; +} + +/* + * csid_isr - CSID module interrupt handler + * @irq: Interrupt line + * @dev: CSID device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t csid_isr(int irq, void *dev) +{ + struct csid_device *csid = dev; + u32 value; + + value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS); + writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD); + + if ((value >> 11) & 0x1) + complete(&csid->reset_complete); + + return IRQ_HANDLED; +} + +/* + * csid_set_clock_rates - Calculate and set clock rates on CSID module + * @csiphy: CSID device + */ +static int csid_set_clock_rates(struct csid_device *csid) +{ + struct device *dev = to_device_index(csid, csid->id); + u32 pixel_clock; + int i, j; + int ret; + + ret = camss_get_pixel_clock(&csid->subdev.entity, &pixel_clock); + if (ret) + pixel_clock = 0; + + for (i = 0; i < csid->nclocks; i++) { + struct camss_clock *clock = &csid->clock[i]; + + if (!strcmp(clock->name, "csi0") || + !strcmp(clock->name, "csi1")) { + u8 bpp = csid_get_fmt_entry( + csid->fmt[MSM_CSIPHY_PAD_SINK].code)->bpp; + u8 num_lanes = csid->phy.lane_cnt; + u64 min_rate = pixel_clock * bpp / (2 * num_lanes * 4); + long rate; + + camss_add_clock_margin(&min_rate); + + for (j = 0; j < clock->nfreqs; j++) + if (min_rate < clock->freq[j]) + break; + + if (j == clock->nfreqs) { + dev_err(dev, + "Pixel clock is too high for CSID\n"); + return -EINVAL; + } + + /* if sensor pixel clock is not available */ + /* set highest possible CSID clock rate */ + if (min_rate == 0) + j = clock->nfreqs - 1; + + rate = clk_round_rate(clock->clk, clock->freq[j]); + if (rate < 0) { + dev_err(dev, "clk round rate failed: %ld\n", + rate); + return -EINVAL; + } + + ret = clk_set_rate(clock->clk, rate); + if (ret < 0) { + dev_err(dev, "clk set rate failed: %d\n", ret); + return ret; + } + } + } + + return 0; +} + +/* + * csid_reset - Trigger reset on CSID module and wait to complete + * @csid: CSID device + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_reset(struct csid_device *csid) +{ + unsigned long time; + + reinit_completion(&csid->reset_complete); + + writel_relaxed(0x7fff, csid->base + CAMSS_CSID_RST_CMD); + + time = wait_for_completion_timeout(&csid->reset_complete, + msecs_to_jiffies(CSID_RESET_TIMEOUT_MS)); + if (!time) { + dev_err(to_device_index(csid, csid->id), + "CSID reset timeout\n"); + return -EIO; + } + + return 0; +} + +/* + * csid_set_power - Power on/off CSID module + * @sd: CSID V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_set_power(struct v4l2_subdev *sd, int on) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct device *dev = to_device_index(csid, csid->id); + int ret; + + if (on) { + u32 hw_version; + + ret = regulator_enable(csid->vdda); + if (ret < 0) + return ret; + + ret = csid_set_clock_rates(csid); + if (ret < 0) { + regulator_disable(csid->vdda); + return ret; + } + + ret = camss_enable_clocks(csid->nclocks, csid->clock, dev); + if (ret < 0) { + regulator_disable(csid->vdda); + return ret; + } + + enable_irq(csid->irq); + + ret = csid_reset(csid); + if (ret < 0) { + disable_irq(csid->irq); + camss_disable_clocks(csid->nclocks, csid->clock); + regulator_disable(csid->vdda); + return ret; + } + + hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION); + dev_dbg(dev, "CSID HW Version = 0x%08x\n", hw_version); + } else { + disable_irq(csid->irq); + camss_disable_clocks(csid->nclocks, csid->clock); + ret = regulator_disable(csid->vdda); + } + + return ret; +} + +/* + * csid_set_stream - Enable/disable streaming on CSID module + * @sd: CSID V4L2 subdevice + * @enable: Requested streaming state + * + * Main configuration of CSID module is also done here. + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct csid_testgen_config *tg = &csid->testgen; + u32 val; + + if (enable) { + u8 vc = 0; /* Virtual Channel 0 */ + u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */ + u8 dt, dt_shift, df; + int ret; + + ret = v4l2_ctrl_handler_setup(&csid->ctrls); + if (ret < 0) { + dev_err(to_device_index(csid, csid->id), + "could not sync v4l2 controls: %d\n", ret); + return ret; + } + + if (!tg->enabled && + !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK])) + return -ENOLINK; + + dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)-> + data_type; + + if (tg->enabled) { + /* Config Test Generator */ + struct v4l2_mbus_framefmt *f = + &csid->fmt[MSM_CSID_PAD_SRC]; + u8 bpp = csid_get_fmt_entry(f->code)->bpp; + u8 spp = csid_get_fmt_entry(f->code)->spp; + u32 num_bytes_per_line = f->width * bpp * spp / 8; + u32 num_lines = f->height; + + /* 31:24 V blank, 23:13 H blank, 3:2 num of active DT */ + /* 1:0 VC */ + val = ((CAMSS_CSID_TG_VC_CFG_V_BLANKING & 0xff) << 24) | + ((CAMSS_CSID_TG_VC_CFG_H_BLANKING & 0x7ff) << 13); + writel_relaxed(val, csid->base + CAMSS_CSID_TG_VC_CFG); + + /* 28:16 bytes per lines, 12:0 num of lines */ + val = ((num_bytes_per_line & 0x1fff) << 16) | + (num_lines & 0x1fff); + writel_relaxed(val, csid->base + + CAMSS_CSID_TG_DT_n_CGG_0(0)); + + /* 5:0 data type */ + val = dt; + writel_relaxed(val, csid->base + + CAMSS_CSID_TG_DT_n_CGG_1(0)); + + /* 2:0 output test pattern */ + val = tg->payload_mode; + writel_relaxed(val, csid->base + + CAMSS_CSID_TG_DT_n_CGG_2(0)); + } else { + struct csid_phy_config *phy = &csid->phy; + + val = phy->lane_cnt - 1; + val |= phy->lane_assign << 4; + + writel_relaxed(val, + csid->base + CAMSS_CSID_CORE_CTRL_0); + + val = phy->csiphy_id << 17; + val |= 0x9; + + writel_relaxed(val, + csid->base + CAMSS_CSID_CORE_CTRL_1); + } + + /* Config LUT */ + + dt_shift = (cid % 4) * 8; + df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)-> + decode_format; + + val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc)); + val &= ~(0xff << dt_shift); + val |= dt << dt_shift; + writel_relaxed(val, csid->base + CAMSS_CSID_CID_LUT_VC_n(vc)); + + val = (df << 4) | 0x3; + writel_relaxed(val, csid->base + CAMSS_CSID_CID_n_CFG(cid)); + + if (tg->enabled) { + val = CAMSS_CSID_TG_CTRL_ENABLE; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL); + } + } else { + if (tg->enabled) { + val = CAMSS_CSID_TG_CTRL_DISABLE; + writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL); + } + } + + return 0; +} + +/* + * __csid_get_format - Get pointer to format structure + * @csid: CSID device + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__csid_get_format(struct csid_device *csid, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&csid->subdev, cfg, pad); + + return &csid->fmt[pad]; +} + +/* + * csid_try_format - Handle try format by pad subdev method + * @csid: CSID device + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void csid_try_format(struct csid_device *csid, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + + switch (pad) { + case MSM_CSID_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++) + if (fmt->code == csid_input_fmts[i].code) + break; + + /* If not found, use UYVY as default */ + if (i >= ARRAY_SIZE(csid_input_fmts)) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_CSID_PAD_SRC: + if (csid->testgen_mode->cur.val == 0) { + /* Test generator is disabled, keep pad formats */ + /* in sync - set and return a format same as sink pad */ + struct v4l2_mbus_framefmt format; + + format = *__csid_get_format(csid, cfg, + MSM_CSID_PAD_SINK, which); + *fmt = format; + } else { + /* Test generator is enabled, set format on source*/ + /* pad to allow test generator usage */ + + for (i = 0; i < ARRAY_SIZE(csid_input_fmts); i++) + if (csid_input_fmts[i].code == fmt->code) + break; + + /* If not found, use UYVY as default */ + if (i >= ARRAY_SIZE(csid_input_fmts)) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + } + break; + } + + fmt->colorspace = V4L2_COLORSPACE_SRGB; +} + +/* + * csid_enum_mbus_code - Handle pixel format enumeration + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * return -EINVAL or zero on success + */ +static int csid_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + if (code->pad == MSM_CSID_PAD_SINK) { + if (code->index >= ARRAY_SIZE(csid_input_fmts)) + return -EINVAL; + + code->code = csid_input_fmts[code->index].code; + } else { + if (csid->testgen_mode->cur.val == 0) { + if (code->index > 0) + return -EINVAL; + + format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SINK, + code->which); + + code->code = format->code; + } else { + if (code->index >= ARRAY_SIZE(csid_input_fmts)) + return -EINVAL; + + code->code = csid_input_fmts[code->index].code; + } + } + + return 0; +} + +/* + * csid_enum_frame_size - Handle frame size enumeration + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * return -EINVAL or zero on success + */ +static int csid_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + csid_try_format(csid, cfg, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + csid_try_format(csid, cfg, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * csid_get_format - Handle get format by pads subdev method + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csid_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __csid_get_format(csid, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +/* + * csid_set_format - Handle set format by pads subdev method + * @sd: CSID V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csid_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __csid_get_format(csid, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + csid_try_format(csid, cfg, fmt->pad, &fmt->format, fmt->which); + *format = fmt->format; + + /* Propagate the format from sink to source */ + if (fmt->pad == MSM_CSID_PAD_SINK) { + format = __csid_get_format(csid, cfg, MSM_CSID_PAD_SRC, + fmt->which); + + *format = fmt->format; + csid_try_format(csid, cfg, MSM_CSID_PAD_SRC, format, + fmt->which); + } + + return 0; +} + +/* + * csid_init_formats - Initialize formats on all pads + * @sd: CSID V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_CSID_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return csid_set_format(sd, fh ? fh->pad : NULL, &format); +} + +static const char * const csid_test_pattern_menu[] = { + "Disabled", + "Incrementing", + "Alternating 0x55/0xAA", + "All Zeros 0x00", + "All Ones 0xFF", + "Pseudo-random Data", +}; + +/* + * csid_set_test_pattern - Set test generator's pattern mode + * @csid: CSID device + * @value: desired test pattern mode + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_set_test_pattern(struct csid_device *csid, s32 value) +{ + struct csid_testgen_config *tg = &csid->testgen; + + /* If CSID is linked to CSIPHY, do not allow to enable test generator */ + if (value && media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK])) + return -EBUSY; + + tg->enabled = !!value; + + switch (value) { + case 1: + tg->payload_mode = CSID_PAYLOAD_MODE_INCREMENTING; + break; + case 2: + tg->payload_mode = CSID_PAYLOAD_MODE_ALTERNATING_55_AA; + break; + case 3: + tg->payload_mode = CSID_PAYLOAD_MODE_ALL_ZEROES; + break; + case 4: + tg->payload_mode = CSID_PAYLOAD_MODE_ALL_ONES; + break; + case 5: + tg->payload_mode = CSID_PAYLOAD_MODE_RANDOM; + break; + } + + return 0; +} + +/* + * csid_s_ctrl - Handle set control subdev method + * @ctrl: pointer to v4l2 control structure + * + * Return 0 on success or a negative error code otherwise + */ +static int csid_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct csid_device *csid = container_of(ctrl->handler, + struct csid_device, ctrls); + int ret = -EINVAL; + + switch (ctrl->id) { + case V4L2_CID_TEST_PATTERN: + ret = csid_set_test_pattern(csid, ctrl->val); + break; + } + + return ret; +} + +static const struct v4l2_ctrl_ops csid_ctrl_ops = { + .s_ctrl = csid_s_ctrl, +}; + +/* + * msm_csid_subdev_init - Initialize CSID device structure and resources + * @csid: CSID device + * @res: CSID module resources table + * @id: CSID module id + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csid_subdev_init(struct csid_device *csid, + const struct resources *res, u8 id) +{ + struct device *dev = to_device_index(csid, id); + struct platform_device *pdev = to_platform_device(dev); + struct resource *r; + int i, j; + int ret; + + csid->id = id; + + /* Memory */ + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]); + csid->base = devm_ioremap_resource(dev, r); + if (IS_ERR(csid->base)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(csid->base); + } + + /* Interrupt */ + + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + res->interrupt[0]); + if (!r) { + dev_err(dev, "missing IRQ\n"); + return -EINVAL; + } + + csid->irq = r->start; + snprintf(csid->irq_name, sizeof(csid->irq_name), "%s_%s%d", + dev_name(dev), MSM_CSID_NAME, csid->id); + ret = devm_request_irq(dev, csid->irq, csid_isr, + IRQF_TRIGGER_RISING, csid->irq_name, csid); + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + disable_irq(csid->irq); + + /* Clocks */ + + csid->nclocks = 0; + while (res->clock[csid->nclocks]) + csid->nclocks++; + + csid->clock = devm_kzalloc(dev, csid->nclocks * sizeof(*csid->clock), + GFP_KERNEL); + if (!csid->clock) + return -ENOMEM; + + for (i = 0; i < csid->nclocks; i++) { + struct camss_clock *clock = &csid->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->name = res->clock[i]; + + clock->nfreqs = 0; + while (res->clock_rate[i][clock->nfreqs]) + clock->nfreqs++; + + if (!clock->nfreqs) { + clock->freq = NULL; + continue; + } + + clock->freq = devm_kzalloc(dev, clock->nfreqs * + sizeof(*clock->freq), GFP_KERNEL); + if (!clock->freq) + return -ENOMEM; + + for (j = 0; j < clock->nfreqs; j++) + clock->freq[j] = res->clock_rate[i][j]; + } + + /* Regulator */ + + csid->vdda = devm_regulator_get(dev, res->regulator[0]); + if (IS_ERR(csid->vdda)) { + dev_err(dev, "could not get regulator\n"); + return PTR_ERR(csid->vdda); + } + + init_completion(&csid->reset_complete); + + return 0; +} + +/* + * msm_csid_get_csid_id - Get CSID HW module id + * @entity: Pointer to CSID media entity structure + * @id: Return CSID HW module id here + */ +void msm_csid_get_csid_id(struct media_entity *entity, u8 *id) +{ + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); + struct csid_device *csid = v4l2_get_subdevdata(sd); + + *id = csid->id; +} + +/* + * csid_get_lane_assign - Calculate CSI2 lane assign configuration parameter + * @lane_cfg - CSI2 lane configuration + * + * Return lane assign + */ +static u32 csid_get_lane_assign(struct csiphy_lanes_cfg *lane_cfg) +{ + u32 lane_assign = 0; + int i; + + for (i = 0; i < lane_cfg->num_data; i++) + lane_assign |= lane_cfg->data[i].pos << (i * 4); + + return lane_assign; +} + +/* + * csid_link_setup - Setup CSID connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Return 0 on success + */ +static int csid_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if (flags & MEDIA_LNK_FL_ENABLED) + if (media_entity_remote_pad(local)) + return -EBUSY; + + if ((local->flags & MEDIA_PAD_FL_SINK) && + (flags & MEDIA_LNK_FL_ENABLED)) { + struct v4l2_subdev *sd; + struct csid_device *csid; + struct csiphy_device *csiphy; + struct csiphy_lanes_cfg *lane_cfg; + struct v4l2_subdev_format format = { 0 }; + + sd = media_entity_to_v4l2_subdev(entity); + csid = v4l2_get_subdevdata(sd); + + /* If test generator is enabled */ + /* do not allow a link from CSIPHY to CSID */ + if (csid->testgen_mode->cur.val != 0) + return -EBUSY; + + sd = media_entity_to_v4l2_subdev(remote->entity); + csiphy = v4l2_get_subdevdata(sd); + + /* If a sensor is not linked to CSIPHY */ + /* do no allow a link from CSIPHY to CSID */ + if (!csiphy->cfg.csi2) + return -EPERM; + + csid->phy.csiphy_id = csiphy->id; + + lane_cfg = &csiphy->cfg.csi2->lane_cfg; + csid->phy.lane_cnt = lane_cfg->num_data; + csid->phy.lane_assign = csid_get_lane_assign(lane_cfg); + + /* Reset format on source pad to sink pad format */ + format.pad = MSM_CSID_PAD_SRC; + format.which = V4L2_SUBDEV_FORMAT_ACTIVE; + csid_set_format(&csid->subdev, NULL, &format); + } + + return 0; +} + +static const struct v4l2_subdev_core_ops csid_core_ops = { + .s_power = csid_set_power, +}; + +static const struct v4l2_subdev_video_ops csid_video_ops = { + .s_stream = csid_set_stream, +}; + +static const struct v4l2_subdev_pad_ops csid_pad_ops = { + .enum_mbus_code = csid_enum_mbus_code, + .enum_frame_size = csid_enum_frame_size, + .get_fmt = csid_get_format, + .set_fmt = csid_set_format, +}; + +static const struct v4l2_subdev_ops csid_v4l2_ops = { + .core = &csid_core_ops, + .video = &csid_video_ops, + .pad = &csid_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops csid_v4l2_internal_ops = { + .open = csid_init_formats, +}; + +static const struct media_entity_operations csid_media_ops = { + .link_setup = csid_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +/* + * msm_csid_register_entity - Register subdev node for CSID module + * @csid: CSID device + * @v4l2_dev: V4L2 device + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csid_register_entity(struct csid_device *csid, + struct v4l2_device *v4l2_dev) +{ + struct v4l2_subdev *sd = &csid->subdev; + struct media_pad *pads = csid->pads; + struct device *dev = to_device_index(csid, csid->id); + int ret; + + v4l2_subdev_init(sd, &csid_v4l2_ops); + sd->internal_ops = &csid_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d", + MSM_CSID_NAME, csid->id); + v4l2_set_subdevdata(sd, csid); + + ret = v4l2_ctrl_handler_init(&csid->ctrls, 1); + if (ret < 0) { + dev_err(dev, "Failed to init ctrl handler: %d\n", ret); + return ret; + } + + csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls, + &csid_ctrl_ops, V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(csid_test_pattern_menu) - 1, 0, 0, + csid_test_pattern_menu); + + if (csid->ctrls.error) { + dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error); + ret = csid->ctrls.error; + goto free_ctrl; + } + + csid->subdev.ctrl_handler = &csid->ctrls; + + ret = csid_init_formats(sd, NULL); + if (ret < 0) { + dev_err(dev, "Failed to init format: %d\n", ret); + goto free_ctrl; + } + + pads[MSM_CSID_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + pads[MSM_CSID_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_IO_V4L; + sd->entity.ops = &csid_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_CSID_PADS_NUM, pads); + if (ret < 0) { + dev_err(dev, "Failed to init media entity: %d\n", ret); + goto free_ctrl; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(dev, "Failed to register subdev: %d\n", ret); + goto media_cleanup; + } + + return 0; + +media_cleanup: + media_entity_cleanup(&sd->entity); +free_ctrl: + v4l2_ctrl_handler_free(&csid->ctrls); + + return ret; +} + +/* + * msm_csid_unregister_entity - Unregister CSID module subdev node + * @csid: CSID device + */ +void msm_csid_unregister_entity(struct csid_device *csid) +{ + v4l2_device_unregister_subdev(&csid->subdev); + media_entity_cleanup(&csid->subdev.entity); + v4l2_ctrl_handler_free(&csid->ctrls); +} diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.h b/drivers/media/platform/qcom/camss-8x16/camss-csid.h new file mode 100644 index 000000000000..8682d3081bc3 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.h @@ -0,0 +1,82 @@ +/* + * camss-csid.h + * + * Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module + * + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_CSID_H +#define QC_MSM_CAMSS_CSID_H + +#include <linux/clk.h> +#include <media/media-entity.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> +#include <media/v4l2-mediabus.h> +#include <media/v4l2-subdev.h> + +#define MSM_CSID_PAD_SINK 0 +#define MSM_CSID_PAD_SRC 1 +#define MSM_CSID_PADS_NUM 2 + +enum csid_payload_mode { + CSID_PAYLOAD_MODE_INCREMENTING = 0, + CSID_PAYLOAD_MODE_ALTERNATING_55_AA = 1, + CSID_PAYLOAD_MODE_ALL_ZEROES = 2, + CSID_PAYLOAD_MODE_ALL_ONES = 3, + CSID_PAYLOAD_MODE_RANDOM = 4, + CSID_PAYLOAD_MODE_USER_SPECIFIED = 5, +}; + +struct csid_testgen_config { + u8 enabled; + enum csid_payload_mode payload_mode; +}; + +struct csid_phy_config { + u8 csiphy_id; + u8 lane_cnt; + u32 lane_assign; +}; + +struct csid_device { + u8 id; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_CSID_PADS_NUM]; + void __iomem *base; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + int nclocks; + struct regulator *vdda; + struct completion reset_complete; + struct csid_testgen_config testgen; + struct csid_phy_config phy; + struct v4l2_mbus_framefmt fmt[MSM_CSID_PADS_NUM]; + struct v4l2_ctrl_handler ctrls; + struct v4l2_ctrl *testgen_mode; +}; + +struct resources; + +int msm_csid_subdev_init(struct csid_device *csid, + const struct resources *res, u8 id); + +int msm_csid_register_entity(struct csid_device *csid, + struct v4l2_device *v4l2_dev); + +void msm_csid_unregister_entity(struct csid_device *csid); + +void msm_csid_get_csid_id(struct media_entity *entity, u8 *id); + +#endif /* QC_MSM_CAMSS_CSID_H */ diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.c b/drivers/media/platform/qcom/camss-8x16/camss-csiphy.c new file mode 100644 index 000000000000..072c6cf053f6 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-csiphy.c @@ -0,0 +1,890 @@ +/* + * camss-csiphy.c + * + * Qualcomm MSM Camera Subsystem - CSIPHY Module + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2016-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> + +#include "camss-csiphy.h" +#include "camss.h" + +#define MSM_CSIPHY_NAME "msm_csiphy" + +#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) +#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) +#define CAMSS_CSI_PHY_GLBL_RESET 0x140 +#define CAMSS_CSI_PHY_GLBL_PWR_CFG 0x144 +#define CAMSS_CSI_PHY_GLBL_IRQ_CMD 0x164 +#define CAMSS_CSI_PHY_HW_VERSION 0x188 +#define CAMSS_CSI_PHY_INTERRUPT_STATUSn(n) (0x18c + 0x4 * (n)) +#define CAMSS_CSI_PHY_INTERRUPT_MASKn(n) (0x1ac + 0x4 * (n)) +#define CAMSS_CSI_PHY_INTERRUPT_CLEARn(n) (0x1cc + 0x4 * (n)) +#define CAMSS_CSI_PHY_GLBL_T_INIT_CFG0 0x1ec +#define CAMSS_CSI_PHY_T_WAKEUP_CFG0 0x1f4 + +static const struct { + u32 code; + u8 bpp; +} csiphy_formats[] = { + { + MEDIA_BUS_FMT_UYVY8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_VYUY8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_YUYV8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_YVYU8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_SBGGR8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SGBRG8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SGRBG8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SRGGB8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SBGGR10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SGBRG10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SGRBG10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SRGGB10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SBGGR12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SGBRG12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SGRBG12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SRGGB12_1X12, + 12, + } +}; + +/* + * csiphy_get_bpp - map media bus format to bits per pixel + * @code: media bus format code + * + * Return number of bits per pixel + */ +static u8 csiphy_get_bpp(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(csiphy_formats); i++) + if (code == csiphy_formats[i].code) + return csiphy_formats[i].bpp; + + WARN(1, "Unknown format\n"); + + return csiphy_formats[0].bpp; +} + +/* + * csiphy_isr - CSIPHY module interrupt handler + * @irq: Interrupt line + * @dev: CSIPHY device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t csiphy_isr(int irq, void *dev) +{ + struct csiphy_device *csiphy = dev; + u8 i; + + for (i = 0; i < 8; i++) { + u8 val = readl_relaxed(csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_STATUSn(i)); + writel_relaxed(val, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_CLEARn(i)); + writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD); + writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD); + writel_relaxed(0x0, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_CLEARn(i)); + } + + return IRQ_HANDLED; +} + +/* + * csiphy_set_clock_rates - Calculate and set clock rates on CSIPHY module + * @csiphy: CSIPHY device + */ +static int csiphy_set_clock_rates(struct csiphy_device *csiphy) +{ + struct device *dev = to_device_index(csiphy, csiphy->id); + u32 pixel_clock; + int i, j; + int ret; + + ret = camss_get_pixel_clock(&csiphy->subdev.entity, &pixel_clock); + if (ret) + pixel_clock = 0; + + for (i = 0; i < csiphy->nclocks; i++) { + struct camss_clock *clock = &csiphy->clock[i]; + + if (!strcmp(clock->name, "csiphy0_timer") || + !strcmp(clock->name, "csiphy1_timer")) { + u8 bpp = csiphy_get_bpp( + csiphy->fmt[MSM_CSIPHY_PAD_SINK].code); + u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data; + u64 min_rate = pixel_clock * bpp / (2 * num_lanes * 4); + long round_rate; + + camss_add_clock_margin(&min_rate); + + for (j = 0; j < clock->nfreqs; j++) + if (min_rate < clock->freq[j]) + break; + + if (j == clock->nfreqs) { + dev_err(dev, + "Pixel clock is too high for CSIPHY\n"); + return -EINVAL; + } + + /* if sensor pixel clock is not available */ + /* set highest possible CSIPHY clock rate */ + if (min_rate == 0) + j = clock->nfreqs - 1; + + round_rate = clk_round_rate(clock->clk, clock->freq[j]); + if (round_rate < 0) { + dev_err(dev, "clk round rate failed: %ld\n", + round_rate); + return -EINVAL; + } + + csiphy->timer_clk_rate = round_rate; + + ret = clk_set_rate(clock->clk, csiphy->timer_clk_rate); + if (ret < 0) { + dev_err(dev, "clk set rate failed: %d\n", ret); + return ret; + } + } + } + + return 0; +} + +/* + * csiphy_reset - Perform software reset on CSIPHY module + * @csiphy: CSIPHY device + */ +static void csiphy_reset(struct csiphy_device *csiphy) +{ + writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET); + usleep_range(5000, 8000); + writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET); +} + +/* + * csiphy_set_power - Power on/off CSIPHY module + * @sd: CSIPHY V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_set_power(struct v4l2_subdev *sd, int on) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct device *dev = to_device_index(csiphy, csiphy->id); + + if (on) { + u8 hw_version; + int ret; + + ret = csiphy_set_clock_rates(csiphy); + if (ret < 0) + return ret; + + ret = camss_enable_clocks(csiphy->nclocks, csiphy->clock, dev); + if (ret < 0) + return ret; + + enable_irq(csiphy->irq); + + csiphy_reset(csiphy); + + hw_version = readl_relaxed(csiphy->base + + CAMSS_CSI_PHY_HW_VERSION); + dev_dbg(dev, "CSIPHY HW Version = 0x%02x\n", hw_version); + } else { + disable_irq(csiphy->irq); + + camss_disable_clocks(csiphy->nclocks, csiphy->clock); + } + + return 0; +} + +/* + * csiphy_get_lane_mask - Calculate CSI2 lane mask configuration parameter + * @lane_cfg - CSI2 lane configuration + * + * Return lane mask + */ +static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg) +{ + u8 lane_mask; + int i; + + lane_mask = 1 << lane_cfg->clk.pos; + + for (i = 0; i < lane_cfg->num_data; i++) + lane_mask |= 1 << lane_cfg->data[i].pos; + + return lane_mask; +} + +/* + * csiphy_settle_cnt_calc - Calculate settle count value + * @csiphy: CSIPHY device + * + * Helper function to calculate settle count value. This is + * based on the CSI2 T_hs_settle parameter which in turn + * is calculated based on the CSI2 transmitter pixel clock + * frequency. + * + * Return settle count value or 0 if the CSI2 pixel clock + * frequency is not available + */ +static u8 csiphy_settle_cnt_calc(struct csiphy_device *csiphy) +{ + u8 bpp = csiphy_get_bpp( + csiphy->fmt[MSM_CSIPHY_PAD_SINK].code); + u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data; + u32 pixel_clock; /* Hz */ + u32 mipi_clock; /* Hz */ + u32 ui; /* ps */ + u32 timer_period; /* ps */ + u32 t_hs_prepare_max; /* ps */ + u32 t_hs_prepare_zero_min; /* ps */ + u32 t_hs_settle; /* ps */ + u8 settle_cnt; + int ret; + + ret = camss_get_pixel_clock(&csiphy->subdev.entity, &pixel_clock); + if (ret) { + dev_err(to_device_index(csiphy, csiphy->id), + "Cannot get CSI2 transmitter's pixel clock\n"); + return 0; + } + if (!pixel_clock) { + dev_err(to_device_index(csiphy, csiphy->id), + "Got pixel clock == 0, cannot continue\n"); + return 0; + } + + mipi_clock = pixel_clock * bpp / (2 * num_lanes); + ui = div_u64(1000000000000LL, mipi_clock); + ui /= 2; + t_hs_prepare_max = 85000 + 6 * ui; + t_hs_prepare_zero_min = 145000 + 10 * ui; + t_hs_settle = (t_hs_prepare_max + t_hs_prepare_zero_min) / 2; + + timer_period = div_u64(1000000000000LL, csiphy->timer_clk_rate); + settle_cnt = t_hs_settle / timer_period; + + return settle_cnt; +} + +/* + * csiphy_stream_on - Enable streaming on CSIPHY module + * @csiphy: CSIPHY device + * + * Helper function to enable streaming on CSIPHY module. + * Main configuration of CSIPHY module is also done here. + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_stream_on(struct csiphy_device *csiphy) +{ + struct csiphy_config *cfg = &csiphy->cfg; + u8 lane_mask = csiphy_get_lane_mask(&cfg->csi2->lane_cfg); + u8 settle_cnt; + u8 val; + int i = 0; + + settle_cnt = csiphy_settle_cnt_calc(csiphy); + if (!settle_cnt) + return -EINVAL; + + val = readl_relaxed(csiphy->base_clk_mux); + if (cfg->combo_mode && (lane_mask & 0x18) == 0x18) { + val &= ~0xf0; + val |= cfg->csid_id << 4; + } else { + val &= ~0xf; + val |= cfg->csid_id; + } + writel_relaxed(val, csiphy->base_clk_mux); + + writel_relaxed(0x1, csiphy->base + + CAMSS_CSI_PHY_GLBL_T_INIT_CFG0); + writel_relaxed(0x1, csiphy->base + + CAMSS_CSI_PHY_T_WAKEUP_CFG0); + + val = 0x1; + val |= lane_mask << 1; + writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG); + + val = cfg->combo_mode << 4; + writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET); + + while (lane_mask) { + if (lane_mask & 0x1) { + writel_relaxed(0x10, csiphy->base + + CAMSS_CSI_PHY_LNn_CFG2(i)); + writel_relaxed(settle_cnt, csiphy->base + + CAMSS_CSI_PHY_LNn_CFG3(i)); + writel_relaxed(0x3f, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_MASKn(i)); + writel_relaxed(0x3f, csiphy->base + + CAMSS_CSI_PHY_INTERRUPT_CLEARn(i)); + } + + lane_mask >>= 1; + i++; + } + + return 0; +} + +/* + * csiphy_stream_off - Disable streaming on CSIPHY module + * @csiphy: CSIPHY device + * + * Helper function to disable streaming on CSIPHY module + */ +static void csiphy_stream_off(struct csiphy_device *csiphy) +{ + u8 lane_mask = csiphy_get_lane_mask(&csiphy->cfg.csi2->lane_cfg); + int i = 0; + + while (lane_mask) { + if (lane_mask & 0x1) + writel_relaxed(0x0, csiphy->base + + CAMSS_CSI_PHY_LNn_CFG2(i)); + + lane_mask >>= 1; + i++; + } + + writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG); +} + + +/* + * csiphy_set_stream - Enable/disable streaming on CSIPHY module + * @sd: CSIPHY V4L2 subdevice + * @enable: Requested streaming state + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + int ret = 0; + + if (enable) + ret = csiphy_stream_on(csiphy); + else + csiphy_stream_off(csiphy); + + return ret; +} + +/* + * __csiphy_get_format - Get pointer to format structure + * @csiphy: CSIPHY device + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__csiphy_get_format(struct csiphy_device *csiphy, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&csiphy->subdev, cfg, pad); + + return &csiphy->fmt[pad]; +} + +/* + * csiphy_try_format - Handle try format by pad subdev method + * @csiphy: CSIPHY device + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void csiphy_try_format(struct csiphy_device *csiphy, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + + switch (pad) { + case MSM_CSIPHY_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < ARRAY_SIZE(csiphy_formats); i++) + if (fmt->code == csiphy_formats[i].code) + break; + + /* If not found, use UYVY as default */ + if (i >= ARRAY_SIZE(csiphy_formats)) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_CSIPHY_PAD_SRC: + /* Set and return a format same as sink pad */ + + *fmt = *__csiphy_get_format(csiphy, cfg, MSM_CSID_PAD_SINK, + which); + + break; + } +} + +/* + * csiphy_enum_mbus_code - Handle pixel format enumeration + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * return -EINVAL or zero on success + */ +static int csiphy_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + if (code->pad == MSM_CSIPHY_PAD_SINK) { + if (code->index >= ARRAY_SIZE(csiphy_formats)) + return -EINVAL; + + code->code = csiphy_formats[code->index].code; + } else { + if (code->index > 0) + return -EINVAL; + + format = __csiphy_get_format(csiphy, cfg, MSM_CSIPHY_PAD_SINK, + code->which); + + code->code = format->code; + } + + return 0; +} + +/* + * csiphy_enum_frame_size - Handle frame size enumeration + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * return -EINVAL or zero on success + */ +static int csiphy_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + csiphy_try_format(csiphy, cfg, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + csiphy_try_format(csiphy, cfg, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * csiphy_get_format - Handle get format by pads subdev method + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csiphy_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +/* + * csiphy_set_format - Handle set format by pads subdev method + * @sd: CSIPHY V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int csiphy_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct csiphy_device *csiphy = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __csiphy_get_format(csiphy, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + csiphy_try_format(csiphy, cfg, fmt->pad, &fmt->format, fmt->which); + *format = fmt->format; + + /* Propagate the format from sink to source */ + if (fmt->pad == MSM_CSIPHY_PAD_SINK) { + format = __csiphy_get_format(csiphy, cfg, MSM_CSIPHY_PAD_SRC, + fmt->which); + + *format = fmt->format; + csiphy_try_format(csiphy, cfg, MSM_CSIPHY_PAD_SRC, format, + fmt->which); + } + + return 0; +} + +/* + * csiphy_init_formats - Initialize formats on all pads + * @sd: CSIPHY V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int csiphy_init_formats(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_CSIPHY_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return csiphy_set_format(sd, fh ? fh->pad : NULL, &format); +} + +/* + * msm_csiphy_subdev_init - Initialize CSIPHY device structure and resources + * @csiphy: CSIPHY device + * @res: CSIPHY module resources table + * @id: CSIPHY module id + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csiphy_subdev_init(struct csiphy_device *csiphy, + const struct resources *res, u8 id) +{ + struct device *dev = to_device_index(csiphy, id); + struct platform_device *pdev = to_platform_device(dev); + struct resource *r; + int i, j; + int ret; + + csiphy->id = id; + csiphy->cfg.combo_mode = 0; + + /* Memory */ + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]); + csiphy->base = devm_ioremap_resource(dev, r); + if (IS_ERR(csiphy->base)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(csiphy->base); + } + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[1]); + csiphy->base_clk_mux = devm_ioremap_resource(dev, r); + if (IS_ERR(csiphy->base_clk_mux)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(csiphy->base_clk_mux); + } + + /* Interrupt */ + + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + res->interrupt[0]); + if (!r) { + dev_err(dev, "missing IRQ\n"); + return -EINVAL; + } + + csiphy->irq = r->start; + snprintf(csiphy->irq_name, sizeof(csiphy->irq_name), "%s_%s%d", + dev_name(dev), MSM_CSIPHY_NAME, csiphy->id); + ret = devm_request_irq(dev, csiphy->irq, csiphy_isr, + IRQF_TRIGGER_RISING, csiphy->irq_name, csiphy); + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + disable_irq(csiphy->irq); + + /* Clocks */ + + csiphy->nclocks = 0; + while (res->clock[csiphy->nclocks]) + csiphy->nclocks++; + + csiphy->clock = devm_kzalloc(dev, csiphy->nclocks * + sizeof(*csiphy->clock), GFP_KERNEL); + if (!csiphy->clock) + return -ENOMEM; + + for (i = 0; i < csiphy->nclocks; i++) { + struct camss_clock *clock = &csiphy->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->name = res->clock[i]; + + clock->nfreqs = 0; + while (res->clock_rate[i][clock->nfreqs]) + clock->nfreqs++; + + if (!clock->nfreqs) { + clock->freq = NULL; + continue; + } + + clock->freq = devm_kzalloc(dev, clock->nfreqs * + sizeof(*clock->freq), GFP_KERNEL); + if (!clock->freq) + return -ENOMEM; + + for (j = 0; j < clock->nfreqs; j++) + clock->freq[j] = res->clock_rate[i][j]; + } + + return 0; +} + +/* + * csiphy_link_setup - Setup CSIPHY connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Rreturn 0 on success + */ +static int csiphy_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if ((local->flags & MEDIA_PAD_FL_SOURCE) && + (flags & MEDIA_LNK_FL_ENABLED)) { + struct v4l2_subdev *sd; + struct csiphy_device *csiphy; + struct csid_device *csid; + + if (media_entity_remote_pad(local)) + return -EBUSY; + + sd = media_entity_to_v4l2_subdev(entity); + csiphy = v4l2_get_subdevdata(sd); + + sd = media_entity_to_v4l2_subdev(remote->entity); + csid = v4l2_get_subdevdata(sd); + + csiphy->cfg.csid_id = csid->id; + } + + return 0; +} + +static const struct v4l2_subdev_core_ops csiphy_core_ops = { + .s_power = csiphy_set_power, +}; + +static const struct v4l2_subdev_video_ops csiphy_video_ops = { + .s_stream = csiphy_set_stream, +}; + +static const struct v4l2_subdev_pad_ops csiphy_pad_ops = { + .enum_mbus_code = csiphy_enum_mbus_code, + .enum_frame_size = csiphy_enum_frame_size, + .get_fmt = csiphy_get_format, + .set_fmt = csiphy_set_format, +}; + +static const struct v4l2_subdev_ops csiphy_v4l2_ops = { + .core = &csiphy_core_ops, + .video = &csiphy_video_ops, + .pad = &csiphy_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops csiphy_v4l2_internal_ops = { + .open = csiphy_init_formats, +}; + +static const struct media_entity_operations csiphy_media_ops = { + .link_setup = csiphy_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +/* + * msm_csiphy_register_entity - Register subdev node for CSIPHY module + * @csiphy: CSIPHY device + * @v4l2_dev: V4L2 device + * + * Return 0 on success or a negative error code otherwise + */ +int msm_csiphy_register_entity(struct csiphy_device *csiphy, + struct v4l2_device *v4l2_dev) +{ + struct v4l2_subdev *sd = &csiphy->subdev; + struct media_pad *pads = csiphy->pads; + struct device *dev = to_device_index(csiphy, csiphy->id); + int ret; + + v4l2_subdev_init(sd, &csiphy_v4l2_ops); + sd->internal_ops = &csiphy_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d", + MSM_CSIPHY_NAME, csiphy->id); + v4l2_set_subdevdata(sd, csiphy); + + ret = csiphy_init_formats(sd, NULL); + if (ret < 0) { + dev_err(dev, "Failed to init format: %d\n", ret); + return ret; + } + + pads[MSM_CSIPHY_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + pads[MSM_CSIPHY_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_IO_V4L; + sd->entity.ops = &csiphy_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_CSIPHY_PADS_NUM, pads); + if (ret < 0) { + dev_err(dev, "Failed to init media entity: %d\n", ret); + return ret; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(dev, "Failed to register subdev: %d\n", ret); + media_entity_cleanup(&sd->entity); + } + + return ret; +} + +/* + * msm_csiphy_unregister_entity - Unregister CSIPHY module subdev node + * @csiphy: CSIPHY device + */ +void msm_csiphy_unregister_entity(struct csiphy_device *csiphy) +{ + v4l2_device_unregister_subdev(&csiphy->subdev); + media_entity_cleanup(&csiphy->subdev.entity); +} diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csiphy.h b/drivers/media/platform/qcom/camss-8x16/camss-csiphy.h new file mode 100644 index 000000000000..ba8781122065 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-csiphy.h @@ -0,0 +1,77 @@ +/* + * camss-csiphy.h + * + * Qualcomm MSM Camera Subsystem - CSIPHY Module + * + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2016-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_CSIPHY_H +#define QC_MSM_CAMSS_CSIPHY_H + +#include <linux/clk.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-mediabus.h> +#include <media/v4l2-subdev.h> + +#define MSM_CSIPHY_PAD_SINK 0 +#define MSM_CSIPHY_PAD_SRC 1 +#define MSM_CSIPHY_PADS_NUM 2 + +struct csiphy_lane { + u8 pos; + u8 pol; +}; + +struct csiphy_lanes_cfg { + int num_data; + struct csiphy_lane *data; + struct csiphy_lane clk; +}; + +struct csiphy_csi2_cfg { + struct csiphy_lanes_cfg lane_cfg; +}; + +struct csiphy_config { + u8 combo_mode; + u8 csid_id; + struct csiphy_csi2_cfg *csi2; +}; + +struct csiphy_device { + u8 id; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_CSIPHY_PADS_NUM]; + void __iomem *base; + void __iomem *base_clk_mux; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + int nclocks; + u32 timer_clk_rate; + struct csiphy_config cfg; + struct v4l2_mbus_framefmt fmt[MSM_CSIPHY_PADS_NUM]; +}; + +struct resources; + +int msm_csiphy_subdev_init(struct csiphy_device *csiphy, + const struct resources *res, u8 id); + +int msm_csiphy_register_entity(struct csiphy_device *csiphy, + struct v4l2_device *v4l2_dev); + +void msm_csiphy_unregister_entity(struct csiphy_device *csiphy); + +#endif /* QC_MSM_CAMSS_CSIPHY_H */ diff --git a/drivers/media/platform/qcom/camss-8x16/camss-ispif.c b/drivers/media/platform/qcom/camss-8x16/camss-ispif.c new file mode 100644 index 000000000000..24da529397b5 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-ispif.c @@ -0,0 +1,1175 @@ +/* + * camss-ispif.c + * + * Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> + +#include "camss-ispif.h" +#include "camss.h" + +#define MSM_ISPIF_NAME "msm_ispif" + +#define ispif_line_array(ptr_line) \ + ((const struct ispif_line (*)[]) &(ptr_line[-(ptr_line->id)])) + +#define to_ispif(ptr_line) \ + container_of(ispif_line_array(ptr_line), struct ispif_device, ptr_line) + +#define ISPIF_RST_CMD_0 0x008 +#define ISPIF_RST_CMD_0_STROBED_RST_EN (1 << 0) +#define ISPIF_RST_CMD_0_MISC_LOGIC_RST (1 << 1) +#define ISPIF_RST_CMD_0_SW_REG_RST (1 << 2) +#define ISPIF_RST_CMD_0_PIX_INTF_0_CSID_RST (1 << 3) +#define ISPIF_RST_CMD_0_PIX_INTF_0_VFE_RST (1 << 4) +#define ISPIF_RST_CMD_0_PIX_INTF_1_CSID_RST (1 << 5) +#define ISPIF_RST_CMD_0_PIX_INTF_1_VFE_RST (1 << 6) +#define ISPIF_RST_CMD_0_RDI_INTF_0_CSID_RST (1 << 7) +#define ISPIF_RST_CMD_0_RDI_INTF_0_VFE_RST (1 << 8) +#define ISPIF_RST_CMD_0_RDI_INTF_1_CSID_RST (1 << 9) +#define ISPIF_RST_CMD_0_RDI_INTF_1_VFE_RST (1 << 10) +#define ISPIF_RST_CMD_0_RDI_INTF_2_CSID_RST (1 << 11) +#define ISPIF_RST_CMD_0_RDI_INTF_2_VFE_RST (1 << 12) +#define ISPIF_RST_CMD_0_PIX_OUTPUT_0_MISR_RST (1 << 16) +#define ISPIF_RST_CMD_0_RDI_OUTPUT_0_MISR_RST (1 << 17) +#define ISPIF_RST_CMD_0_RDI_OUTPUT_1_MISR_RST (1 << 18) +#define ISPIF_RST_CMD_0_RDI_OUTPUT_2_MISR_RST (1 << 19) +#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x01c +#define ISPIF_VFE_m_CTRL_0(m) (0x200 + 0x200 * (m)) +#define ISPIF_VFE_m_CTRL_0_PIX0_LINE_BUF_EN (1 << 6) +#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE 0x00001249 +#define ISPIF_VFE_m_IRQ_MASK_0_PIX0_MASK 0x00001fff +#define ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE 0x02492000 +#define ISPIF_VFE_m_IRQ_MASK_0_RDI0_MASK 0x03ffe000 +#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20c + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE 0x00001249 +#define ISPIF_VFE_m_IRQ_MASK_1_PIX1_MASK 0x00001fff +#define ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE 0x02492000 +#define ISPIF_VFE_m_IRQ_MASK_1_RDI1_MASK 0x03ffe000 +#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE 0x00001249 +#define ISPIF_VFE_m_IRQ_MASK_2_RDI2_MASK 0x00001fff +#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x21c + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW (1 << 12) +#define ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW (1 << 25) +#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x220 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW (1 << 12) +#define ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW (1 << 25) +#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x224 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW (1 << 12) +#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x230 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x234 + 0x200 * (m)) +#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x238 + 0x200 * (m)) +#define ISPIF_VFE_m_INTF_INPUT_SEL(m) (0x244 + 0x200 * (m)) +#define ISPIF_VFE_m_INTF_CMD_0(m) (0x248 + 0x200 * (m)) +#define ISPIF_VFE_m_INTF_CMD_1(m) (0x24c + 0x200 * (m)) +#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) \ + (0x254 + 0x200 * (m) + 0x4 * (n)) +#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) \ + (0x264 + 0x200 * (m) + 0x4 * (n)) +#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) \ + (0x2c0 + 0x200 * (m) + 0x4 * (n)) +#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) \ + (0x2d0 + 0x200 * (m) + 0x4 * (n)) + +#define CSI_PIX_CLK_MUX_SEL 0x000 +#define CSI_RDI_CLK_MUX_SEL 0x008 + +#define ISPIF_TIMEOUT_SLEEP_US 1000 +#define ISPIF_TIMEOUT_ALL_US 1000000 +#define ISPIF_RESET_TIMEOUT_MS 500 + +enum ispif_intf_cmd { + CMD_DISABLE_FRAME_BOUNDARY = 0x0, + CMD_ENABLE_FRAME_BOUNDARY = 0x1, + CMD_DISABLE_IMMEDIATELY = 0x2, + CMD_ALL_DISABLE_IMMEDIATELY = 0xaaaaaaaa, + CMD_ALL_NO_CHANGE = 0xffffffff, +}; + +static const u32 ispif_formats[] = { + MEDIA_BUS_FMT_UYVY8_2X8, + MEDIA_BUS_FMT_VYUY8_2X8, + MEDIA_BUS_FMT_YUYV8_2X8, + MEDIA_BUS_FMT_YVYU8_2X8, + MEDIA_BUS_FMT_SBGGR8_1X8, + MEDIA_BUS_FMT_SGBRG8_1X8, + MEDIA_BUS_FMT_SGRBG8_1X8, + MEDIA_BUS_FMT_SRGGB8_1X8, + MEDIA_BUS_FMT_SBGGR10_1X10, + MEDIA_BUS_FMT_SGBRG10_1X10, + MEDIA_BUS_FMT_SGRBG10_1X10, + MEDIA_BUS_FMT_SRGGB10_1X10, + MEDIA_BUS_FMT_SBGGR12_1X12, + MEDIA_BUS_FMT_SGBRG12_1X12, + MEDIA_BUS_FMT_SGRBG12_1X12, + MEDIA_BUS_FMT_SRGGB12_1X12, +}; + +/* + * ispif_isr - ISPIF module interrupt handler + * @irq: Interrupt line + * @dev: ISPIF device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t ispif_isr(int irq, void *dev) +{ + struct ispif_device *ispif = dev; + u32 value0, value1, value2; + + value0 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(0)); + value1 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(0)); + value2 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(0)); + + writel_relaxed(value0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(0)); + writel_relaxed(value1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(0)); + writel_relaxed(value2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(0)); + + writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD); + + if ((value0 >> 27) & 0x1) + complete(&ispif->reset_complete); + + if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW)) + dev_err_ratelimited(to_device(ispif), "VFE0 pix0 overflow\n"); + + if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW)) + dev_err_ratelimited(to_device(ispif), "VFE0 rdi0 overflow\n"); + + if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW)) + dev_err_ratelimited(to_device(ispif), "VFE0 pix1 overflow\n"); + + if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW)) + dev_err_ratelimited(to_device(ispif), "VFE0 rdi1 overflow\n"); + + if (unlikely(value2 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW)) + dev_err_ratelimited(to_device(ispif), "VFE0 rdi2 overflow\n"); + + return IRQ_HANDLED; +} + +/* + * ispif_reset - Trigger reset on ISPIF module and wait to complete + * @ispif: ISPIF device + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_reset(struct ispif_device *ispif) +{ + unsigned long time; + u32 val; + int ret; + + ret = camss_enable_clocks(ispif->nclocks_for_reset, + ispif->clock_for_reset, + to_device(ispif)); + if (ret < 0) + return ret; + + reinit_completion(&ispif->reset_complete); + + val = ISPIF_RST_CMD_0_STROBED_RST_EN | + ISPIF_RST_CMD_0_MISC_LOGIC_RST | + ISPIF_RST_CMD_0_SW_REG_RST | + ISPIF_RST_CMD_0_PIX_INTF_0_CSID_RST | + ISPIF_RST_CMD_0_PIX_INTF_0_VFE_RST | + ISPIF_RST_CMD_0_PIX_INTF_1_CSID_RST | + ISPIF_RST_CMD_0_PIX_INTF_1_VFE_RST | + ISPIF_RST_CMD_0_RDI_INTF_0_CSID_RST | + ISPIF_RST_CMD_0_RDI_INTF_0_VFE_RST | + ISPIF_RST_CMD_0_RDI_INTF_1_CSID_RST | + ISPIF_RST_CMD_0_RDI_INTF_1_VFE_RST | + ISPIF_RST_CMD_0_RDI_INTF_2_CSID_RST | + ISPIF_RST_CMD_0_RDI_INTF_2_VFE_RST | + ISPIF_RST_CMD_0_PIX_OUTPUT_0_MISR_RST | + ISPIF_RST_CMD_0_RDI_OUTPUT_0_MISR_RST | + ISPIF_RST_CMD_0_RDI_OUTPUT_1_MISR_RST | + ISPIF_RST_CMD_0_RDI_OUTPUT_2_MISR_RST; + + writel_relaxed(val, ispif->base + ISPIF_RST_CMD_0); + + time = wait_for_completion_timeout(&ispif->reset_complete, + msecs_to_jiffies(ISPIF_RESET_TIMEOUT_MS)); + if (!time) { + dev_err(to_device(ispif), "ISPIF reset timeout\n"); + return -EIO; + } + + camss_disable_clocks(ispif->nclocks_for_reset, ispif->clock_for_reset); + + return 0; +} + +/* + * ispif_set_power - Power on/off ISPIF module + * @sd: ISPIF V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_set_power(struct v4l2_subdev *sd, int on) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct ispif_device *ispif = to_ispif(line); + struct device *dev = to_device(ispif); + int ret = 0; + + mutex_lock(&ispif->power_lock); + + if (on) { + if (ispif->power_count) { + /* Power is already on */ + ispif->power_count++; + goto exit; + } + + ret = camss_enable_clocks(ispif->nclocks, ispif->clock, dev); + if (ret < 0) + goto exit; + + ret = ispif_reset(ispif); + if (ret < 0) { + camss_disable_clocks(ispif->nclocks, ispif->clock); + goto exit; + } + + ispif->intf_cmd[line->vfe_id].cmd_0 = CMD_ALL_NO_CHANGE; + ispif->intf_cmd[line->vfe_id].cmd_1 = CMD_ALL_NO_CHANGE; + + ispif->power_count++; + } else { + if (ispif->power_count == 0) { + dev_err(dev, "ispif power off on power_count == 0\n"); + goto exit; + } else if (ispif->power_count == 1) { + camss_disable_clocks(ispif->nclocks, ispif->clock); + } + + ispif->power_count--; + } + +exit: + mutex_unlock(&ispif->power_lock); + + return ret; +} + +/* + * ispif_select_clk_mux - Select clock for PIX/RDI interface + * @ispif: ISPIF device + * @intf: VFE interface + * @csid: CSID HW module id + * @vfe: VFE HW module id + * @enable: enable or disable the selected clock + */ +static void ispif_select_clk_mux(struct ispif_device *ispif, + enum ispif_intf intf, u8 csid, + u8 vfe, u8 enable) +{ + u32 val; + + switch (intf) { + case PIX0: + val = readl_relaxed(ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + val &= ~(0xf << (vfe * 8)); + if (enable) + val |= (csid << (vfe * 8)); + writel_relaxed(val, ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + break; + + case RDI0: + val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + val &= ~(0xf << (vfe * 12)); + if (enable) + val |= (csid << (vfe * 12)); + writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + break; + + case PIX1: + val = readl_relaxed(ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + val &= ~(0xf << (4 + (vfe * 8))); + if (enable) + val |= (csid << (4 + (vfe * 8))); + writel_relaxed(val, ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL); + break; + + case RDI1: + val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + val &= ~(0xf << (4 + (vfe * 12))); + if (enable) + val |= (csid << (4 + (vfe * 12))); + writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + break; + + case RDI2: + val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + val &= ~(0xf << (8 + (vfe * 12))); + if (enable) + val |= (csid << (8 + (vfe * 12))); + writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL); + break; + } + + mb(); +} + +/* + * ispif_validate_intf_status - Validate current status of PIX/RDI interface + * @ispif: ISPIF device + * @intf: VFE interface + * @vfe: VFE HW module id + * + * Return 0 when interface is idle or -EBUSY otherwise + */ +static int ispif_validate_intf_status(struct ispif_device *ispif, + enum ispif_intf intf, u8 vfe) +{ + int ret = 0; + u32 val = 0; + + switch (intf) { + case PIX0: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 0)); + break; + case RDI0: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0)); + break; + case PIX1: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 1)); + break; + case RDI1: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 1)); + break; + case RDI2: + val = readl_relaxed(ispif->base + + ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 2)); + break; + } + + if ((val & 0xf) != 0xf) { + dev_err(to_device(ispif), "%s: ispif is busy: 0x%x\n", + __func__, val); + ret = -EBUSY; + } + + return ret; +} + +/* + * ispif_wait_for_stop - Wait for PIX/RDI interface to stop + * @ispif: ISPIF device + * @intf: VFE interface + * @vfe: VFE HW module id + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_wait_for_stop(struct ispif_device *ispif, + enum ispif_intf intf, u8 vfe) +{ + u32 addr = 0; + u32 stop_flag = 0; + int ret; + + switch (intf) { + case PIX0: + addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 0); + break; + case RDI0: + addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0); + break; + case PIX1: + addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 1); + break; + case RDI1: + addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 1); + break; + case RDI2: + addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 2); + break; + } + + ret = readl_poll_timeout(ispif->base + addr, + stop_flag, + (stop_flag & 0xf) == 0xf, + ISPIF_TIMEOUT_SLEEP_US, + ISPIF_TIMEOUT_ALL_US); + if (ret < 0) + dev_err(to_device(ispif), "%s: ispif stop timeout\n", + __func__); + + return ret; +} + +/* + * ispif_select_csid - Select CSID HW module for input from + * @ispif: ISPIF device + * @intf: VFE interface + * @csid: CSID HW module id + * @vfe: VFE HW module id + * @enable: enable or disable the selected input + */ +static void ispif_select_csid(struct ispif_device *ispif, enum ispif_intf intf, + u8 csid, u8 vfe, u8 enable) +{ + u32 val; + + val = readl_relaxed(ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe)); + switch (intf) { + case PIX0: + val &= ~(BIT(1) | BIT(0)); + if (enable) + val |= csid; + break; + case RDI0: + val &= ~(BIT(5) | BIT(4)); + if (enable) + val |= (csid << 4); + break; + case PIX1: + val &= ~(BIT(9) | BIT(8)); + if (enable) + val |= (csid << 8); + break; + case RDI1: + val &= ~(BIT(13) | BIT(12)); + if (enable) + val |= (csid << 12); + break; + case RDI2: + val &= ~(BIT(21) | BIT(20)); + if (enable) + val |= (csid << 20); + break; + } + + writel(val, ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe)); +} + +/* + * ispif_select_cid - Enable/disable desired CID + * @ispif: ISPIF device + * @intf: VFE interface + * @cid: desired CID to enable/disable + * @vfe: VFE HW module id + * @enable: enable or disable the desired CID + */ +static void ispif_select_cid(struct ispif_device *ispif, enum ispif_intf intf, + u8 cid, u8 vfe, u8 enable) +{ + u32 cid_mask = 1 << cid; + u32 addr = 0; + u32 val; + + switch (intf) { + case PIX0: + addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 0); + break; + case RDI0: + addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 0); + break; + case PIX1: + addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 1); + break; + case RDI1: + addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 1); + break; + case RDI2: + addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 2); + break; + } + + val = readl_relaxed(ispif->base + addr); + if (enable) + val |= cid_mask; + else + val &= ~cid_mask; + + writel(val, ispif->base + addr); +} + +/* + * ispif_config_irq - Enable/disable interrupts for PIX/RDI interface + * @ispif: ISPIF device + * @intf: VFE interface + * @vfe: VFE HW module id + * @enable: enable or disable + */ +static void ispif_config_irq(struct ispif_device *ispif, enum ispif_intf intf, + u8 vfe, u8 enable) +{ + u32 val; + + switch (intf) { + case PIX0: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_0_PIX0_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe)); + break; + case RDI0: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_0_RDI0_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe)); + break; + case PIX1: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_1_PIX1_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe)); + break; + case RDI1: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_1_RDI1_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe)); + break; + case RDI2: + val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe)); + val &= ~ISPIF_VFE_m_IRQ_MASK_2_RDI2_MASK; + if (enable) + val |= ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE; + writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe)); + writel_relaxed(ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE, + ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(vfe)); + break; + } + + writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD); +} + +/* + * ispif_set_intf_cmd - Set command to enable/disable interface + * @ispif: ISPIF device + * @cmd: interface command + * @intf: VFE interface + * @vfe: VFE HW module id + * @vc: virtual channel + */ +static void ispif_set_intf_cmd(struct ispif_device *ispif, u8 cmd, + enum ispif_intf intf, u8 vfe, u8 vc) +{ + u32 *val; + + if (intf == RDI2) { + val = &ispif->intf_cmd[vfe].cmd_1; + *val &= ~(0x3 << (vc * 2 + 8)); + *val |= (cmd << (vc * 2 + 8)); + wmb(); + writel_relaxed(*val, ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe)); + wmb(); + } else { + val = &ispif->intf_cmd[vfe].cmd_0; + *val &= ~(0x3 << (vc * 2 + intf * 8)); + *val |= (cmd << (vc * 2 + intf * 8)); + wmb(); + writel_relaxed(*val, ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe)); + wmb(); + } +} + +/* + * ispif_set_stream - Enable/disable streaming on ISPIF module + * @sd: ISPIF V4L2 subdevice + * @enable: Requested streaming state + * + * Main configuration of ISPIF module is also done here. + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct ispif_device *ispif = to_ispif(line); + enum ispif_intf intf = line->interface; + u8 csid = line->csid_id; + u8 vfe = line->vfe_id; + u8 vc = 0; /* Virtual Channel 0 */ + u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */ + int ret; + + if (enable) { + if (!media_entity_remote_pad(&line->pads[MSM_ISPIF_PAD_SINK])) + return -ENOLINK; + + /* Config */ + + mutex_lock(&ispif->config_lock); + ispif_select_clk_mux(ispif, intf, csid, vfe, 1); + + ret = ispif_validate_intf_status(ispif, intf, vfe); + if (ret < 0) { + mutex_unlock(&ispif->config_lock); + return ret; + } + + ispif_select_csid(ispif, intf, csid, vfe, 1); + ispif_select_cid(ispif, intf, cid, vfe, 1); + ispif_config_irq(ispif, intf, vfe, 1); + ispif_set_intf_cmd(ispif, CMD_ENABLE_FRAME_BOUNDARY, + intf, vfe, vc); + } else { + mutex_lock(&ispif->config_lock); + ispif_set_intf_cmd(ispif, CMD_DISABLE_FRAME_BOUNDARY, + intf, vfe, vc); + mutex_unlock(&ispif->config_lock); + + ret = ispif_wait_for_stop(ispif, intf, vfe); + if (ret < 0) + return ret; + + mutex_lock(&ispif->config_lock); + ispif_config_irq(ispif, intf, vfe, 0); + ispif_select_cid(ispif, intf, cid, vfe, 0); + ispif_select_csid(ispif, intf, csid, vfe, 0); + ispif_select_clk_mux(ispif, intf, csid, vfe, 0); + } + + mutex_unlock(&ispif->config_lock); + + return 0; +} + +/* + * __ispif_get_format - Get pointer to format structure + * @ispif: ISPIF line + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__ispif_get_format(struct ispif_line *line, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&line->subdev, cfg, pad); + + return &line->fmt[pad]; +} + +/* + * ispif_try_format - Handle try format by pad subdev method + * @ispif: ISPIF line + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void ispif_try_format(struct ispif_line *line, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + + switch (pad) { + case MSM_ISPIF_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < ARRAY_SIZE(ispif_formats); i++) + if (fmt->code == ispif_formats[i]) + break; + + /* If not found, use UYVY as default */ + if (i >= ARRAY_SIZE(ispif_formats)) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_ISPIF_PAD_SRC: + /* Set and return a format same as sink pad */ + + *fmt = *__ispif_get_format(line, cfg, MSM_ISPIF_PAD_SINK, + which); + + break; + } + + fmt->colorspace = V4L2_COLORSPACE_SRGB; +} + +/* + * ispif_enum_mbus_code - Handle pixel format enumeration + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * return -EINVAL or zero on success + */ +static int ispif_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + if (code->pad == MSM_ISPIF_PAD_SINK) { + if (code->index >= ARRAY_SIZE(ispif_formats)) + return -EINVAL; + + code->code = ispif_formats[code->index]; + } else { + if (code->index > 0) + return -EINVAL; + + format = __ispif_get_format(line, cfg, MSM_ISPIF_PAD_SINK, + code->which); + + code->code = format->code; + } + + return 0; +} + +/* + * ispif_enum_frame_size - Handle frame size enumeration + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * return -EINVAL or zero on success + */ +static int ispif_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + ispif_try_format(line, cfg, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + ispif_try_format(line, cfg, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * ispif_get_format - Handle get format by pads subdev method + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int ispif_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __ispif_get_format(line, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +/* + * ispif_set_format - Handle set format by pads subdev method + * @sd: ISPIF V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int ispif_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ispif_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __ispif_get_format(line, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + ispif_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which); + *format = fmt->format; + + /* Propagate the format from sink to source */ + if (fmt->pad == MSM_ISPIF_PAD_SINK) { + format = __ispif_get_format(line, cfg, MSM_ISPIF_PAD_SRC, + fmt->which); + + *format = fmt->format; + ispif_try_format(line, cfg, MSM_ISPIF_PAD_SRC, format, + fmt->which); + } + + return 0; +} + +/* + * ispif_init_formats - Initialize formats on all pads + * @sd: ISPIF V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int ispif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_ISPIF_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return ispif_set_format(sd, fh ? fh->pad : NULL, &format); +} + +/* + * msm_ispif_subdev_init - Initialize ISPIF device structure and resources + * @ispif: ISPIF device + * @res: ISPIF module resources table + * + * Return 0 on success or a negative error code otherwise + */ +int msm_ispif_subdev_init(struct ispif_device *ispif, + const struct resources_ispif *res) +{ + struct device *dev = to_device(ispif); + struct platform_device *pdev = to_platform_device(dev); + struct resource *r; + int i; + int ret; + + /* Memory */ + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]); + ispif->base = devm_ioremap_resource(dev, r); + if (IS_ERR(ispif->base)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(ispif->base); + } + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[1]); + ispif->base_clk_mux = devm_ioremap_resource(dev, r); + if (IS_ERR(ispif->base_clk_mux)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(ispif->base_clk_mux); + } + + /* Interrupt */ + + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res->interrupt); + + if (!r) { + dev_err(dev, "missing IRQ\n"); + return -EINVAL; + } + + ispif->irq = r->start; + snprintf(ispif->irq_name, sizeof(ispif->irq_name), "%s_%s", + dev_name(dev), MSM_ISPIF_NAME); + ret = devm_request_irq(dev, ispif->irq, ispif_isr, + IRQF_TRIGGER_RISING, ispif->irq_name, ispif); + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + /* Clocks */ + + ispif->nclocks = 0; + while (res->clock[ispif->nclocks]) + ispif->nclocks++; + + ispif->clock = devm_kzalloc(dev, ispif->nclocks * sizeof(*ispif->clock), + GFP_KERNEL); + if (!ispif->clock) + return -ENOMEM; + + for (i = 0; i < ispif->nclocks; i++) { + struct camss_clock *clock = &ispif->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->freq = NULL; + clock->nfreqs = 0; + } + + ispif->nclocks_for_reset = 0; + while (res->clock_for_reset[ispif->nclocks_for_reset]) + ispif->nclocks_for_reset++; + + ispif->clock_for_reset = devm_kzalloc(dev, ispif->nclocks_for_reset * + sizeof(*ispif->clock_for_reset), GFP_KERNEL); + if (!ispif->clock_for_reset) + return -ENOMEM; + + for (i = 0; i < ispif->nclocks_for_reset; i++) { + struct camss_clock *clock = &ispif->clock_for_reset[i]; + + clock->clk = devm_clk_get(dev, res->clock_for_reset[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->freq = NULL; + clock->nfreqs = 0; + } + + for (i = 0; i < ARRAY_SIZE(ispif->line); i++) + ispif->line[i].id = i; + + mutex_init(&ispif->power_lock); + ispif->power_count = 0; + + mutex_init(&ispif->config_lock); + + init_completion(&ispif->reset_complete); + + return 0; +} + +/* + * ispif_get_intf - Get ISPIF interface to use by VFE line id + * @line_id: VFE line id that the ISPIF line is connected to + * + * Return ISPIF interface to use + */ +static enum ispif_intf ispif_get_intf(enum vfe_line_id line_id) +{ + switch (line_id) { + case (VFE_LINE_RDI0): + return RDI0; + case (VFE_LINE_RDI1): + return RDI1; + case (VFE_LINE_RDI2): + return RDI2; + case (VFE_LINE_PIX): + return PIX0; + default: + return RDI0; + } +} + +/* + * ispif_link_setup - Setup ISPIF connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Return 0 on success + */ +static int ispif_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if (flags & MEDIA_LNK_FL_ENABLED) { + if (media_entity_remote_pad(local)) + return -EBUSY; + + if (local->flags & MEDIA_PAD_FL_SINK) { + struct v4l2_subdev *sd; + struct ispif_line *line; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + + msm_csid_get_csid_id(remote->entity, &line->csid_id); + } else { /* MEDIA_PAD_FL_SOURCE */ + struct v4l2_subdev *sd; + struct ispif_line *line; + enum vfe_line_id id; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + + msm_vfe_get_vfe_id(remote->entity, &line->vfe_id); + msm_vfe_get_vfe_line_id(remote->entity, &id); + line->interface = ispif_get_intf(id); + } + } + + return 0; +} + +static const struct v4l2_subdev_core_ops ispif_core_ops = { + .s_power = ispif_set_power, +}; + +static const struct v4l2_subdev_video_ops ispif_video_ops = { + .s_stream = ispif_set_stream, +}; + +static const struct v4l2_subdev_pad_ops ispif_pad_ops = { + .enum_mbus_code = ispif_enum_mbus_code, + .enum_frame_size = ispif_enum_frame_size, + .get_fmt = ispif_get_format, + .set_fmt = ispif_set_format, +}; + +static const struct v4l2_subdev_ops ispif_v4l2_ops = { + .core = &ispif_core_ops, + .video = &ispif_video_ops, + .pad = &ispif_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops ispif_v4l2_internal_ops = { + .open = ispif_init_formats, +}; + +static const struct media_entity_operations ispif_media_ops = { + .link_setup = ispif_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +/* + * msm_ispif_register_entities - Register subdev node for ISPIF module + * @ispif: ISPIF device + * @v4l2_dev: V4L2 device + * + * Return 0 on success or a negative error code otherwise + */ +int msm_ispif_register_entities(struct ispif_device *ispif, + struct v4l2_device *v4l2_dev) +{ + struct device *dev = to_device(ispif); + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(ispif->line); i++) { + struct v4l2_subdev *sd = &ispif->line[i].subdev; + struct media_pad *pads = ispif->line[i].pads; + + v4l2_subdev_init(sd, &ispif_v4l2_ops); + sd->internal_ops = &ispif_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d", + MSM_ISPIF_NAME, i); + v4l2_set_subdevdata(sd, &ispif->line[i]); + + ret = ispif_init_formats(sd, NULL); + if (ret < 0) { + dev_err(dev, "Failed to init format: %d\n", ret); + goto error; + } + + pads[MSM_ISPIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + pads[MSM_ISPIF_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_IO_V4L; + sd->entity.ops = &ispif_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_ISPIF_PADS_NUM, + pads); + if (ret < 0) { + dev_err(dev, "Failed to init media entity: %d\n", ret); + goto error; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(dev, "Failed to register subdev: %d\n", ret); + media_entity_cleanup(&sd->entity); + goto error; + } + } + + return 0; + +error: + for (i--; i >= 0; i--) { + struct v4l2_subdev *sd = &ispif->line[i].subdev; + + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } + + return ret; +} + +/* + * msm_ispif_unregister_entities - Unregister ISPIF module subdev node + * @ispif: ISPIF device + */ +void msm_ispif_unregister_entities(struct ispif_device *ispif) +{ + int i; + + mutex_destroy(&ispif->power_lock); + mutex_destroy(&ispif->config_lock); + + for (i = 0; i < ARRAY_SIZE(ispif->line); i++) { + struct v4l2_subdev *sd = &ispif->line[i].subdev; + + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } +} diff --git a/drivers/media/platform/qcom/camss-8x16/camss-ispif.h b/drivers/media/platform/qcom/camss-8x16/camss-ispif.h new file mode 100644 index 000000000000..f668306020c3 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-ispif.h @@ -0,0 +1,85 @@ +/* + * camss-ispif.h + * + * Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module + * + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_ISPIF_H +#define QC_MSM_CAMSS_ISPIF_H + +#include <linux/clk.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> + +/* Number of ISPIF lines - same as number of CSID hardware modules */ +#define MSM_ISPIF_LINE_NUM 2 + +#define MSM_ISPIF_PAD_SINK 0 +#define MSM_ISPIF_PAD_SRC 1 +#define MSM_ISPIF_PADS_NUM 2 + +#define MSM_ISPIF_VFE_NUM 1 + +enum ispif_intf { + PIX0, + RDI0, + PIX1, + RDI1, + RDI2 +}; + +struct ispif_intf_cmd_reg { + u32 cmd_0; + u32 cmd_1; +}; + +struct ispif_line { + u8 id; + u8 csid_id; + u8 vfe_id; + enum ispif_intf interface; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_ISPIF_PADS_NUM]; + struct v4l2_mbus_framefmt fmt[MSM_ISPIF_PADS_NUM]; +}; + +struct ispif_device { + void __iomem *base; + void __iomem *base_clk_mux; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + int nclocks; + struct camss_clock *clock_for_reset; + int nclocks_for_reset; + struct completion reset_complete; + int power_count; + struct mutex power_lock; + struct ispif_intf_cmd_reg intf_cmd[MSM_ISPIF_VFE_NUM]; + struct mutex config_lock; + struct ispif_line line[MSM_ISPIF_LINE_NUM]; +}; + +struct resources_ispif; + +int msm_ispif_subdev_init(struct ispif_device *ispif, + const struct resources_ispif *res); + +int msm_ispif_register_entities(struct ispif_device *ispif, + struct v4l2_device *v4l2_dev); + +void msm_ispif_unregister_entities(struct ispif_device *ispif); + +#endif /* QC_MSM_CAMSS_ISPIF_H */ diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c new file mode 100644 index 000000000000..dcc2b397f713 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c @@ -0,0 +1,3090 @@ +/* + * camss-vfe.c + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/iommu.h> +#include <linux/iopoll.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spinlock_types.h> +#include <linux/spinlock.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> + +#include "camss-vfe.h" +#include "camss.h" + +#define MSM_VFE_NAME "msm_vfe" + +#define vfe_line_array(ptr_line) \ + ((const struct vfe_line (*)[]) &(ptr_line[-(ptr_line->id)])) + +#define to_vfe(ptr_line) \ + container_of(vfe_line_array(ptr_line), struct vfe_device, ptr_line) + +#define VFE_0_HW_VERSION 0x000 + +#define VFE_0_GLOBAL_RESET_CMD 0x00c +#define VFE_0_GLOBAL_RESET_CMD_CORE (1 << 0) +#define VFE_0_GLOBAL_RESET_CMD_CAMIF (1 << 1) +#define VFE_0_GLOBAL_RESET_CMD_BUS (1 << 2) +#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG (1 << 3) +#define VFE_0_GLOBAL_RESET_CMD_REGISTER (1 << 4) +#define VFE_0_GLOBAL_RESET_CMD_TIMER (1 << 5) +#define VFE_0_GLOBAL_RESET_CMD_PM (1 << 6) +#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR (1 << 7) +#define VFE_0_GLOBAL_RESET_CMD_TESTGEN (1 << 8) + +#define VFE_0_MODULE_CFG 0x018 +#define VFE_0_MODULE_CFG_DEMUX (1 << 2) +#define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE (1 << 3) +#define VFE_0_MODULE_CFG_SCALE_ENC (1 << 23) +#define VFE_0_MODULE_CFG_CROP_ENC (1 << 27) + +#define VFE_0_CORE_CFG 0x01c +#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6 +#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7 + +#define VFE_0_IRQ_CMD 0x024 +#define VFE_0_IRQ_CMD_GLOBAL_CLEAR (1 << 0) + +#define VFE_0_IRQ_MASK_0 0x028 +#define VFE_0_IRQ_MASK_0_CAMIF_SOF (1 << 0) +#define VFE_0_IRQ_MASK_0_CAMIF_EOF (1 << 1) +#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5)) +#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \ + ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n)) +#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8)) +#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25)) +#define VFE_0_IRQ_MASK_0_RESET_ACK (1 << 31) +#define VFE_0_IRQ_MASK_1 0x02c +#define VFE_0_IRQ_MASK_1_CAMIF_ERROR (1 << 0) +#define VFE_0_IRQ_MASK_1_VIOLATION (1 << 7) +#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK (1 << 8) +#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) (1 << ((n) + 9)) +#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) (1 << ((n) + 29)) + +#define VFE_0_IRQ_CLEAR_0 0x030 +#define VFE_0_IRQ_CLEAR_1 0x034 + +#define VFE_0_IRQ_STATUS_0 0x038 +#define VFE_0_IRQ_STATUS_0_CAMIF_SOF (1 << 0) +#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5)) +#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \ + ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n)) +#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8)) +#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25)) +#define VFE_0_IRQ_STATUS_0_RESET_ACK (1 << 31) +#define VFE_0_IRQ_STATUS_1 0x03c +#define VFE_0_IRQ_STATUS_1_VIOLATION (1 << 7) +#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK (1 << 8) +#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) (1 << ((n) + 29)) + +#define VFE_0_IRQ_COMPOSITE_MASK_0 0x40 +#define VFE_0_VIOLATION_STATUS 0x48 + +#define VFE_0_BUS_CMD 0x4c +#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) (1 << (x)) + +#define VFE_0_BUS_CFG 0x050 + +#define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2)) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN (1 << 1) +#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4) +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6 +#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7 + +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1F << 2) + +#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16 +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \ + (0x088 + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \ + (0x08c + 0x24 * (n)) +#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff + +#define VFE_0_BUS_PING_PONG_STATUS 0x268 + +#define VFE_0_BUS_BDG_CMD 0x2c0 +#define VFE_0_BUS_BDG_CMD_HALT_REQ 1 + +#define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4 +#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5 +#define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8 +#define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc +#define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0 +#define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4 +#define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8 +#define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc +#define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0 +#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5 + +#define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x))) +#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28 +#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28) +#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4 +#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4) +#define VFE_0_RDI_CFG_x_RDI_EN_BIT (1 << 2) +#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3 +#define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) (1 << (16 + (r))) + +#define VFE_0_CAMIF_CMD 0x2f4 +#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0 +#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1 +#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS (1 << 2) +#define VFE_0_CAMIF_CFG 0x2f8 +#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN (1 << 6) +#define VFE_0_CAMIF_FRAME_CFG 0x300 +#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304 +#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308 +#define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c +#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314 +#define VFE_0_CAMIF_STATUS 0x31c +#define VFE_0_CAMIF_STATUS_HALT (1 << 31) + +#define VFE_0_REG_UPDATE 0x378 +#define VFE_0_REG_UPDATE_RDIn(n) (1 << (1 + (n))) +#define VFE_0_REG_UPDATE_line_n(n) \ + ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n)) + +#define VFE_0_DEMUX_CFG 0x424 +#define VFE_0_DEMUX_CFG_PERIOD 0x3 +#define VFE_0_DEMUX_GAIN_0 0x428 +#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0) +#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16) +#define VFE_0_DEMUX_GAIN_1 0x42c +#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0) +#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16) +#define VFE_0_DEMUX_EVEN_CFG 0x438 +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca +#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9 +#define VFE_0_DEMUX_ODD_CFG 0x43c +#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac +#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c +#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca +#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9 + +#define VFE_0_SCALE_ENC_Y_CFG 0x75c +#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760 +#define VFE_0_SCALE_ENC_Y_H_PHASE 0x764 +#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c +#define VFE_0_SCALE_ENC_Y_V_PHASE 0x770 +#define VFE_0_SCALE_ENC_CBCR_CFG 0x778 +#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c +#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780 +#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790 +#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794 + +#define VFE_0_CROP_ENC_Y_WIDTH 0x854 +#define VFE_0_CROP_ENC_Y_HEIGHT 0x858 +#define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c +#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860 + +#define VFE_0_CLAMP_ENC_MAX_CFG 0x874 +#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0) +#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8) +#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16) +#define VFE_0_CLAMP_ENC_MIN_CFG 0x878 +#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0) +#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8) +#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16) + +#define VFE_0_CGC_OVERRIDE_1 0x974 +#define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) (1 << (x)) + +/* VFE reset timeout */ +#define VFE_RESET_TIMEOUT_MS 50 +/* VFE halt timeout */ +#define VFE_HALT_TIMEOUT_MS 100 +/* Max number of frame drop updates per frame */ +#define VFE_FRAME_DROP_UPDATES 5 +/* Frame drop value. NOTE: VAL + UPDATES should not exceed 31 */ +#define VFE_FRAME_DROP_VAL 20 + +#define VFE_NEXT_SOF_MS 500 + +#define CAMIF_TIMEOUT_SLEEP_US 1000 +#define CAMIF_TIMEOUT_ALL_US 1000000 + +#define SCALER_RATIO_MAX 16 + +static const struct { + u32 code; + u8 bpp; +} vfe_formats[] = { + { + MEDIA_BUS_FMT_UYVY8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_VYUY8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_YUYV8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_YVYU8_2X8, + 8, + }, + { + MEDIA_BUS_FMT_SBGGR8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SGBRG8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SGRBG8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SRGGB8_1X8, + 8, + }, + { + MEDIA_BUS_FMT_SBGGR10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SGBRG10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SGRBG10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SRGGB10_1X10, + 10, + }, + { + MEDIA_BUS_FMT_SBGGR12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SGBRG12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SGRBG12_1X12, + 12, + }, + { + MEDIA_BUS_FMT_SRGGB12_1X12, + 12, + } +}; + +/* + * vfe_get_bpp - map media bus format to bits per pixel + * @code: media bus format code + * + * Return number of bits per pixel + */ +static u8 vfe_get_bpp(u32 code) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(vfe_formats); i++) + if (code == vfe_formats[i].code) + return vfe_formats[i].bpp; + + WARN(1, "Unknown format\n"); + + return vfe_formats[0].bpp; +} + +static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits) +{ + u32 bits = readl_relaxed(vfe->base + reg); + + writel_relaxed(bits & ~clr_bits, vfe->base + reg); +} + +static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits) +{ + u32 bits = readl_relaxed(vfe->base + reg); + + writel_relaxed(bits | set_bits, vfe->base + reg); +} + +static void vfe_global_reset(struct vfe_device *vfe) +{ + u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN | + VFE_0_GLOBAL_RESET_CMD_BUS_MISR | + VFE_0_GLOBAL_RESET_CMD_PM | + VFE_0_GLOBAL_RESET_CMD_TIMER | + VFE_0_GLOBAL_RESET_CMD_REGISTER | + VFE_0_GLOBAL_RESET_CMD_BUS_BDG | + VFE_0_GLOBAL_RESET_CMD_BUS | + VFE_0_GLOBAL_RESET_CMD_CAMIF | + VFE_0_GLOBAL_RESET_CMD_CORE; + + writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD); +} + +static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable) +{ + if (enable) + vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT); + else + vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT); +} + +static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable) +{ + if (enable) + vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT); + else + vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm), + 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT); +} + +#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N)) + +static int vfe_word_per_line(uint32_t format, uint32_t pixel_per_line) +{ + int val = 0; + + switch (format) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + val = CALC_WORD(pixel_per_line, 1, 8); + break; + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_YVYU: + case V4L2_PIX_FMT_UYVY: + case V4L2_PIX_FMT_VYUY: + val = CALC_WORD(pixel_per_line, 2, 8); + break; + } + + return val; +} + +static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane, + u16 *width, u16 *height, u16 *bytesperline) +{ + switch (pix->pixelformat) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + *width = pix->width; + *height = pix->height; + *bytesperline = pix->plane_fmt[0].bytesperline; + if (plane == 1) + *height /= 2; + break; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + *width = pix->width; + *height = pix->height; + *bytesperline = pix->plane_fmt[0].bytesperline; + break; + } +} + +static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm, + struct v4l2_pix_format_mplane *pix, + u8 plane, u32 enable) +{ + u32 reg; + + if (enable) { + u16 width = 0, height = 0, bytesperline = 0, wpl; + + vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline); + + wpl = vfe_word_per_line(pix->pixelformat, width); + + reg = height - 1; + reg |= ((wpl + 1) / 2 - 1) << 16; + + writel_relaxed(reg, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); + + wpl = vfe_word_per_line(pix->pixelformat, bytesperline); + + reg = 0x3; + reg |= (height - 1) << 4; + reg |= wpl << 16; + + writel_relaxed(reg, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); + } else { + writel_relaxed(0, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm)); + writel_relaxed(0, vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm)); + } +} + +static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per) +{ + u32 reg; + + reg = readl_relaxed(vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); + + reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK); + + reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT) + & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK; + + writel_relaxed(reg, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm)); +} + +static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm, + u32 pattern) +{ + writel_relaxed(pattern, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm)); +} + +static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm, u16 offset, + u16 depth) +{ + u32 reg; + + reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) | + depth; + writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm)); +} + +static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm) +{ + wmb(); + writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD); + wmb(); +} + +static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr) +{ + writel_relaxed(addr, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm)); +} + +static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr) +{ + writel_relaxed(addr, + vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm)); +} + +static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm) +{ + u32 reg; + + reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS); + + return (reg >> wm) & 0x1; +} + +static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable) +{ + if (enable) + writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG); + else + writel_relaxed(0, vfe->base + VFE_0_BUS_CFG); +} + +static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm, + enum vfe_line_id id) +{ + u32 reg; + + reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS; + reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id); + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg); + + reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; + reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) & + VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg); + + switch (id) { + case VFE_LINE_RDI0: + default: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI1: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI2: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + } + + if (wm % 2 == 1) + reg <<= 16; + + vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); +} + +static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm) +{ + writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF, + vfe->base + + VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm)); +} + +static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm, + enum vfe_line_id id) +{ + u32 reg; + + reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id); + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg); + + reg = VFE_0_RDI_CFG_x_RDI_EN_BIT; + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg); + + switch (id) { + case VFE_LINE_RDI0: + default: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI1: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + case VFE_LINE_RDI2: + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + break; + } + + if (wm % 2 == 1) + reg <<= 16; + + vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg); +} + +static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output, + u8 enable) +{ + struct vfe_line *line = container_of(output, struct vfe_line, output); + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + unsigned int i; + + for (i = 0; i < output->wm_num; i++) { + if (i == 0) { + reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA << + VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT; + } else if (i == 1) { + reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16) + reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA; + } + + if (output->wm_idx[i] % 2 == 1) + reg <<= 16; + + if (enable) + vfe_reg_set(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]), + reg); + else + vfe_reg_clr(vfe, + VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]), + reg); + } +} + +static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid) +{ + vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), + VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK); + + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), + cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT); +} + +static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id); + wmb(); + writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE); + wmb(); +} + +static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm, + enum vfe_line_id line_id, u8 enable) +{ + u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) | + VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); + u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) | + VFE_0_IRQ_MASK_1_RDIn_SOF(line_id); + + if (enable) { + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); + } else { + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); + } +} + +static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp, + enum vfe_line_id line_id, u8 enable) +{ + struct vfe_output *output = &vfe->line[line_id].output; + unsigned int i; + u32 irq_en0; + u32 irq_en1; + u32 comp_mask = 0; + + irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF; + irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF; + irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp); + irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id); + irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR; + for (i = 0; i < output->wm_num; i++) { + irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW( + output->wm_idx[i]); + comp_mask |= (1 << output->wm_idx[i]) << comp * 8; + } + + if (enable) { + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); + vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); + } else { + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1); + vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask); + } +} + +static void vfe_enable_irq_common(struct vfe_device *vfe) +{ + u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK; + u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION | + VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK; + + vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0); + vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1); +} + +static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 val, even_cfg, odd_cfg; + + writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG); + + val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD; + writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0); + + val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2; + writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1); + + switch (line->fmt[MSM_VFE_PAD_SINK].code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY; + odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY; + break; + } + + writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG); + writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG); +} + +static inline u8 vfe_calc_interp_reso(u16 input, u16 output) +{ + if (input / output >= 16) + return 0; + + if (input / output >= 8) + return 1; + + if (input / output >= 4) + return 2; + + return 3; +} + +static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + u16 input, output; + u8 interp_reso; + u32 phase_mult; + + writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG); + + input = line->fmt[MSM_VFE_PAD_SINK].width; + output = line->compose.width; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE); + + input = line->fmt[MSM_VFE_PAD_SINK].height; + output = line->compose.height; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE); + + writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG); + + input = line->fmt[MSM_VFE_PAD_SINK].width; + output = line->compose.width / 2; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE); + + input = line->fmt[MSM_VFE_PAD_SINK].height; + output = line->compose.height; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) + output = line->compose.height / 2; + reg = (output << 16) | input; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE); + + interp_reso = vfe_calc_interp_reso(input, output); + phase_mult = input * (1 << (13 + interp_reso)) / output; + reg = (interp_reso << 20) | phase_mult; + writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE); +} + +static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat; + u32 reg; + u16 first, last; + + first = line->crop.left; + last = line->crop.left + line->crop.width - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH); + + first = line->crop.top; + last = line->crop.top + line->crop.height - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT); + + first = line->crop.left / 2; + last = line->crop.left / 2 + line->crop.width / 2 - 1; + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH); + + first = line->crop.top; + last = line->crop.top + line->crop.height - 1; + if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) { + first = line->crop.top / 2; + last = line->crop.top / 2 + line->crop.height / 2 - 1; + } + reg = (first << 16) | last; + writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT); +} + +static void vfe_set_clamp_cfg(struct vfe_device *vfe) +{ + u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 | + VFE_0_CLAMP_ENC_MAX_CFG_CH1 | + VFE_0_CLAMP_ENC_MAX_CFG_CH2; + + writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG); + + val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 | + VFE_0_CLAMP_ENC_MIN_CFG_CH1 | + VFE_0_CLAMP_ENC_MIN_CFG_CH2; + + writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG); +} + +/* + * vfe_reset - Trigger reset on VFE module and wait to complete + * @vfe: VFE device + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_reset(struct vfe_device *vfe) +{ + unsigned long time; + + reinit_completion(&vfe->reset_complete); + + vfe_global_reset(vfe); + + time = wait_for_completion_timeout(&vfe->reset_complete, + msecs_to_jiffies(VFE_RESET_TIMEOUT_MS)); + if (!time) { + dev_err(to_device(vfe), "VFE reset timeout\n"); + return -EIO; + } + + return 0; +} + +/* + * vfe_halt - Trigger halt on VFE module and wait to complete + * @vfe: VFE device + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_halt(struct vfe_device *vfe) +{ + unsigned long time; + + reinit_completion(&vfe->halt_complete); + + writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ, + vfe->base + VFE_0_BUS_BDG_CMD); + + time = wait_for_completion_timeout(&vfe->halt_complete, + msecs_to_jiffies(VFE_HALT_TIMEOUT_MS)); + if (!time) { + dev_err(to_device(vfe), "VFE halt timeout\n"); + return -EIO; + } + + return 0; +} + +static void vfe_init_outputs(struct vfe_device *vfe) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->line); i++) { + struct vfe_output *output = &vfe->line[i].output; + + output->state = VFE_OUTPUT_OFF; + output->buf[0] = NULL; + output->buf[1] = NULL; + INIT_LIST_HEAD(&output->pending_bufs); + + output->wm_num = 1; + if (vfe->line[i].id == VFE_LINE_PIX) + output->wm_num = 2; + } +} + +static void vfe_reset_output_maps(struct vfe_device *vfe) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) + vfe->wm_output_map[i] = VFE_LINE_NONE; +} + +static void vfe_set_qos(struct vfe_device *vfe) +{ + u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG; + u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG; + + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5); + writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6); + writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7); +} + +static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable) +{ + u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm); + + if (enable) + vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val); + else + vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val); + + wmb(); +} + +static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable) +{ + u32 val = VFE_0_MODULE_CFG_DEMUX | + VFE_0_MODULE_CFG_CHROMA_UPSAMPLE | + VFE_0_MODULE_CFG_SCALE_ENC | + VFE_0_MODULE_CFG_CROP_ENC; + + if (enable) + writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG); + else + writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG); +} + +static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line) +{ + u32 val; + + switch (line->fmt[MSM_VFE_PAD_SINK].code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY; + break; + } + + writel_relaxed(val, vfe->base + VFE_0_CORE_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].width * 2; + val |= line->fmt[MSM_VFE_PAD_SINK].height << 16; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG); + + val = line->fmt[MSM_VFE_PAD_SINK].height - 1; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0); + + val = 0xffffffff; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN); + + val = VFE_0_RDI_CFG_x_MIPI_EN_BITS; + vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val); + + val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN; + writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG); +} + +static void vfe_set_camif_cmd(struct vfe_device *vfe, u32 cmd) +{ + writel_relaxed(VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS, + vfe->base + VFE_0_CAMIF_CMD); + + writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD); +} + +static int vfe_camif_wait_for_stop(struct vfe_device *vfe) +{ + u32 val; + int ret; + + ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS, + val, + (val & VFE_0_CAMIF_STATUS_HALT), + CAMIF_TIMEOUT_SLEEP_US, + CAMIF_TIMEOUT_ALL_US); + if (ret < 0) + dev_err(to_device(vfe), "%s: camif stop timeout\n", __func__); + + return ret; +} + +static void vfe_output_init_addrs(struct vfe_device *vfe, + struct vfe_output *output, u8 sync) +{ + u32 ping_addr; + u32 pong_addr; + unsigned int i; + + output->active_buf = 0; + + for (i = 0; i < output->wm_num; i++) { + if (output->buf[0]) + ping_addr = output->buf[0]->addr[i]; + else + ping_addr = 0; + + if (output->buf[1]) + pong_addr = output->buf[1]->addr[i]; + else + pong_addr = ping_addr; + + vfe_wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr); + vfe_wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr); + if (sync) + vfe_bus_reload_wm(vfe, output->wm_idx[i]); + } +} + +static void vfe_output_update_ping_addr(struct vfe_device *vfe, + struct vfe_output *output, u8 sync) +{ + u32 addr; + unsigned int i; + + for (i = 0; i < output->wm_num; i++) { + if (output->buf[0]) + addr = output->buf[0]->addr[i]; + else + addr = 0; + + vfe_wm_set_ping_addr(vfe, output->wm_idx[i], addr); + if (sync) + vfe_bus_reload_wm(vfe, output->wm_idx[i]); + } +} + +static void vfe_output_update_pong_addr(struct vfe_device *vfe, + struct vfe_output *output, u8 sync) +{ + u32 addr; + unsigned int i; + + for (i = 0; i < output->wm_num; i++) { + if (output->buf[1]) + addr = output->buf[1]->addr[i]; + else + addr = 0; + + vfe_wm_set_pong_addr(vfe, output->wm_idx[i], addr); + if (sync) + vfe_bus_reload_wm(vfe, output->wm_idx[i]); + } + +} + +static int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + int ret = -EBUSY; + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) { + if (vfe->wm_output_map[i] == VFE_LINE_NONE) { + vfe->wm_output_map[i] = line_id; + ret = i; + break; + } + } + + return ret; +} + +static int vfe_release_wm(struct vfe_device *vfe, u8 wm) +{ + if (wm >= ARRAY_SIZE(vfe->wm_output_map)) + return -EINVAL; + + vfe->wm_output_map[wm] = VFE_LINE_NONE; + + return 0; +} + +static void vfe_output_frame_drop(struct vfe_device *vfe, + struct vfe_output *output, + u32 drop_pattern) +{ + u8 drop_period; + unsigned int i; + + /* We need to toggle update period to be valid on next frame */ + output->drop_update_idx++; + output->drop_update_idx %= VFE_FRAME_DROP_UPDATES; + drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx; + + for (i = 0; i < output->wm_num; i++) { + vfe_wm_set_framedrop_period(vfe, output->wm_idx[i], + drop_period); + vfe_wm_set_framedrop_pattern(vfe, output->wm_idx[i], + drop_pattern); + } + vfe_reg_update(vfe, container_of(output, struct vfe_line, output)->id); +} + +static struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output) +{ + struct camss_buffer *buffer = NULL; + + if (!list_empty(&output->pending_bufs)) { + buffer = list_first_entry(&output->pending_bufs, + struct camss_buffer, + queue); + list_del(&buffer->queue); + } + + return buffer; +} + +/* + * vfe_buf_add_pending - Add output buffer to list of pending + * @output: VFE output + * @buffer: Video buffer + */ +static void vfe_buf_add_pending(struct vfe_output *output, + struct camss_buffer *buffer) +{ + INIT_LIST_HEAD(&buffer->queue); + list_add_tail(&buffer->queue, &output->pending_bufs); +} + +/* + * vfe_buf_flush_pending - Flush all pending buffers. + * @output: VFE output + * @state: vb2 buffer state + */ +static void vfe_buf_flush_pending(struct vfe_output *output, + enum vb2_buffer_state state) +{ + struct camss_buffer *buf; + struct camss_buffer *t; + + list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) { + vb2_buffer_done(&buf->vb.vb2_buf, state); + list_del(&buf->queue); + } +} + +static void vfe_buf_update_wm_on_next(struct vfe_device *vfe, + struct vfe_output *output) +{ + switch (output->state) { + case VFE_OUTPUT_CONTINUOUS: + vfe_output_frame_drop(vfe, output, 3); + break; + case VFE_OUTPUT_SINGLE: + default: + dev_err_ratelimited(to_device(vfe), + "Next buf in wrong state! %d\n", + output->state); + break; + } +} + +static void vfe_buf_update_wm_on_last(struct vfe_device *vfe, + struct vfe_output *output) +{ + switch (output->state) { + case VFE_OUTPUT_CONTINUOUS: + output->state = VFE_OUTPUT_SINGLE; + vfe_output_frame_drop(vfe, output, 1); + break; + case VFE_OUTPUT_SINGLE: + output->state = VFE_OUTPUT_STOPPING; + vfe_output_frame_drop(vfe, output, 0); + break; + default: + dev_err_ratelimited(to_device(vfe), + "Last buff in wrong state! %d\n", + output->state); + break; + } +} + +static void vfe_buf_update_wm_on_new(struct vfe_device *vfe, + struct vfe_output *output, + struct camss_buffer *new_buf) +{ + int inactive_idx; + + switch (output->state) { + case VFE_OUTPUT_SINGLE: + inactive_idx = !output->active_buf; + + if (!output->buf[inactive_idx]) { + output->buf[inactive_idx] = new_buf; + + if (inactive_idx) + vfe_output_update_pong_addr(vfe, output, 0); + else + vfe_output_update_ping_addr(vfe, output, 0); + + vfe_output_frame_drop(vfe, output, 3); + output->state = VFE_OUTPUT_CONTINUOUS; + } else { + vfe_buf_add_pending(output, new_buf); + dev_err_ratelimited(to_device(vfe), + "Inactive buffer is busy\n"); + } + break; + + case VFE_OUTPUT_IDLE: + if (!output->buf[0]) { + output->buf[0] = new_buf; + + vfe_output_init_addrs(vfe, output, 1); + + vfe_output_frame_drop(vfe, output, 1); + output->state = VFE_OUTPUT_SINGLE; + } else { + vfe_buf_add_pending(output, new_buf); + dev_err_ratelimited(to_device(vfe), + "Output idle with buffer set!\n"); + } + break; + + case VFE_OUTPUT_CONTINUOUS: + default: + vfe_buf_add_pending(output, new_buf); + break; + } +} + +static int vfe_get_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output; + unsigned long flags; + int i; + int wm_idx; + + spin_lock_irqsave(&vfe->output_lock, flags); + + output = &line->output; + if (output->state != VFE_OUTPUT_OFF) { + dev_err(to_device(vfe), "Output is running\n"); + goto error; + } + output->state = VFE_OUTPUT_RESERVED; + + output->active_buf = 0; + + for (i = 0; i < output->wm_num; i++) { + wm_idx = vfe_reserve_wm(vfe, line->id); + if (wm_idx < 0) { + dev_err(to_device(vfe), "Can not reserve wm\n"); + goto error_get_wm; + } + output->wm_idx[i] = wm_idx; + } + + output->drop_update_idx = 0; + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; + +error_get_wm: + for (i--; i >= 0; i--) + vfe_release_wm(vfe, output->wm_idx[i]); + output->state = VFE_OUTPUT_OFF; +error: + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return -EINVAL; +} + +static int vfe_put_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&vfe->output_lock, flags); + + for (i = 0; i < output->wm_num; i++) + vfe_release_wm(vfe, output->wm_idx[i]); + + output->state = VFE_OUTPUT_OFF; + + spin_unlock_irqrestore(&vfe->output_lock, flags); + return 0; +} + +static int vfe_enable_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + unsigned long flags; + unsigned int i; + u16 ub_size; + + switch (vfe->id) { + case 0: + ub_size = MSM_VFE_VFE0_UB_SIZE_RDI; + break; + case 1: + ub_size = MSM_VFE_VFE1_UB_SIZE_RDI; + break; + default: + return -EINVAL; + } + + spin_lock_irqsave(&vfe->output_lock, flags); + + vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line->id); + + if (output->state != VFE_OUTPUT_RESERVED) { + dev_err(to_device(vfe), "Output is not in reserved state %d\n", + output->state); + spin_unlock_irqrestore(&vfe->output_lock, flags); + return -EINVAL; + } + output->state = VFE_OUTPUT_IDLE; + + output->buf[0] = vfe_buf_get_pending(output); + output->buf[1] = vfe_buf_get_pending(output); + + if (!output->buf[0] && output->buf[1]) { + output->buf[0] = output->buf[1]; + output->buf[1] = NULL; + } + + if (output->buf[0]) + output->state = VFE_OUTPUT_SINGLE; + + if (output->buf[1]) + output->state = VFE_OUTPUT_CONTINUOUS; + + switch (output->state) { + case VFE_OUTPUT_SINGLE: + /* Skip 4 bad frames from sensor */ + vfe_output_frame_drop(vfe, output, 1 << 4); + break; + case VFE_OUTPUT_CONTINUOUS: + /* Skip 4 bad frames from sensor */ + vfe_output_frame_drop(vfe, output, 3 << 4); + break; + default: + vfe_output_frame_drop(vfe, output, 0); + break; + } + + output->sequence = 0; + output->wait_sof = 0; + output->wait_reg_update = 0; + reinit_completion(&output->sof); + reinit_completion(&output->reg_update); + + vfe_output_init_addrs(vfe, output, 0); + + if (line->id != VFE_LINE_PIX) { + vfe_set_cgc_override(vfe, output->wm_idx[0], 1); + vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1); + vfe_bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id); + vfe_wm_set_subsample(vfe, output->wm_idx[0]); + vfe_set_rdi_cid(vfe, line->id, 0); + vfe_wm_set_ub_cfg(vfe, output->wm_idx[0], + (ub_size + 1) * output->wm_idx[0], ub_size); + vfe_wm_frame_based(vfe, output->wm_idx[0], 1); + vfe_wm_enable(vfe, output->wm_idx[0], 1); + vfe_bus_reload_wm(vfe, output->wm_idx[0]); + } else { + ub_size /= output->wm_num; + for (i = 0; i < output->wm_num; i++) { + vfe_set_cgc_override(vfe, output->wm_idx[i], 1); + vfe_wm_set_subsample(vfe, output->wm_idx[i]); + vfe_wm_set_ub_cfg(vfe, output->wm_idx[i], + (ub_size + 1) * output->wm_idx[i], + ub_size); + vfe_wm_line_based(vfe, output->wm_idx[i], + &line->video_out.active_fmt.fmt.pix_mp, + i, 1); + vfe_wm_enable(vfe, output->wm_idx[i], 1); + vfe_bus_reload_wm(vfe, output->wm_idx[i]); + } + vfe_enable_irq_pix_line(vfe, 0, line->id, 1); + vfe_set_module_cfg(vfe, 1); + vfe_set_camif_cfg(vfe, line); + vfe_set_xbar_cfg(vfe, output, 1); + vfe_set_demux_cfg(vfe, line); + vfe_set_scale_cfg(vfe, line); + vfe_set_crop_cfg(vfe, line); + vfe_set_clamp_cfg(vfe); + vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY); + } + + vfe_reg_update(vfe, line->id); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +static int vfe_disable_output(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + struct vfe_output *output = &line->output; + unsigned long flags; + unsigned long time; + unsigned int i; + + spin_lock_irqsave(&vfe->output_lock, flags); + + output->wait_sof = 1; + spin_unlock_irqrestore(&vfe->output_lock, flags); + + time = wait_for_completion_timeout(&output->sof, + msecs_to_jiffies(VFE_NEXT_SOF_MS)); + if (!time) + dev_err(to_device(vfe), "VFE sof timeout\n"); + + spin_lock_irqsave(&vfe->output_lock, flags); + for (i = 0; i < output->wm_num; i++) + vfe_wm_enable(vfe, output->wm_idx[i], 0); + + vfe_reg_update(vfe, line->id); + output->wait_reg_update = 1; + spin_unlock_irqrestore(&vfe->output_lock, flags); + + time = wait_for_completion_timeout(&output->reg_update, + msecs_to_jiffies(VFE_NEXT_SOF_MS)); + if (!time) + dev_err(to_device(vfe), "VFE reg update timeout\n"); + + spin_lock_irqsave(&vfe->output_lock, flags); + + if (line->id != VFE_LINE_PIX) { + vfe_wm_frame_based(vfe, output->wm_idx[0], 0); + vfe_bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id); + vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0); + vfe_set_cgc_override(vfe, output->wm_idx[0], 0); + spin_unlock_irqrestore(&vfe->output_lock, flags); + } else { + for (i = 0; i < output->wm_num; i++) { + vfe_wm_line_based(vfe, output->wm_idx[i], NULL, i, 0); + vfe_set_cgc_override(vfe, output->wm_idx[i], 0); + } + + vfe_enable_irq_pix_line(vfe, 0, line->id, 0); + vfe_set_module_cfg(vfe, 0); + vfe_set_xbar_cfg(vfe, output, 0); + + vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY); + spin_unlock_irqrestore(&vfe->output_lock, flags); + + vfe_camif_wait_for_stop(vfe); + } + + return 0; +} + +/* + * vfe_enable - Enable streaming on VFE line + * @line: VFE line + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_enable(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + int ret; + + mutex_lock(&vfe->stream_lock); + + if (!vfe->stream_count) { + vfe_enable_irq_common(vfe); + + vfe_bus_enable_wr_if(vfe, 1); + + vfe_set_qos(vfe); + } + + vfe->stream_count++; + + mutex_unlock(&vfe->stream_lock); + + ret = vfe_get_output(line); + if (ret < 0) + goto error_get_output; + + ret = vfe_enable_output(line); + if (ret < 0) + goto error_enable_output; + + vfe->was_streaming = 1; + + return 0; + + +error_enable_output: + vfe_put_output(line); + +error_get_output: + mutex_lock(&vfe->stream_lock); + + if (vfe->stream_count == 1) + vfe_bus_enable_wr_if(vfe, 0); + + vfe->stream_count--; + + mutex_unlock(&vfe->stream_lock); + + return ret; +} + +/* + * vfe_disable - Disable streaming on VFE line + * @line: VFE line + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_disable(struct vfe_line *line) +{ + struct vfe_device *vfe = to_vfe(line); + + vfe_disable_output(line); + + vfe_put_output(line); + + mutex_lock(&vfe->stream_lock); + + if (vfe->stream_count == 1) + vfe_bus_enable_wr_if(vfe, 0); + + vfe->stream_count--; + + mutex_unlock(&vfe->stream_lock); + + return 0; +} + +/* + * vfe_isr_sof - Process start of frame interrupt + * @vfe: VFE Device + * @line_id: VFE line + */ +static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + struct vfe_output *output; + unsigned long flags; + + spin_lock_irqsave(&vfe->output_lock, flags); + output = &vfe->line[line_id].output; + if (output->wait_sof) { + output->wait_sof = 0; + complete(&output->sof); + } + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_isr_reg_update - Process reg update interrupt + * @vfe: VFE Device + * @line_id: VFE line + */ +static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id) +{ + struct vfe_output *output; + unsigned long flags; + + spin_lock_irqsave(&vfe->output_lock, flags); + vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id); + + output = &vfe->line[line_id].output; + + if (output->wait_reg_update) { + output->wait_reg_update = 0; + complete(&output->reg_update); + spin_unlock_irqrestore(&vfe->output_lock, flags); + return; + } + + if (output->state == VFE_OUTPUT_STOPPING) { + /* Release last buffer when hw is idle */ + if (output->last_buffer) { + vb2_buffer_done(&output->last_buffer->vb.vb2_buf, + VB2_BUF_STATE_DONE); + output->last_buffer = NULL; + } + output->state = VFE_OUTPUT_IDLE; + + /* Buffers received in stopping state are queued in */ + /* dma pending queue, start next capture here */ + + output->buf[0] = vfe_buf_get_pending(output); + output->buf[1] = vfe_buf_get_pending(output); + + if (!output->buf[0] && output->buf[1]) { + output->buf[0] = output->buf[1]; + output->buf[1] = NULL; + } + + if (output->buf[0]) + output->state = VFE_OUTPUT_SINGLE; + + if (output->buf[1]) + output->state = VFE_OUTPUT_CONTINUOUS; + + switch (output->state) { + case VFE_OUTPUT_SINGLE: + vfe_output_frame_drop(vfe, output, 2); + break; + case VFE_OUTPUT_CONTINUOUS: + vfe_output_frame_drop(vfe, output, 3); + break; + default: + vfe_output_frame_drop(vfe, output, 0); + break; + } + + vfe_output_init_addrs(vfe, output, 1); + } + + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_isr_wm_done - Process write master done interrupt + * @vfe: VFE Device + * @wm: Write master id + */ +static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm) +{ + struct camss_buffer *ready_buf; + struct vfe_output *output; + dma_addr_t *new_addr; + unsigned long flags; + u32 active_index; + u64 ts = ktime_get_ns(); + unsigned int i; + + active_index = vfe_wm_get_ping_pong_status(vfe, wm); + + spin_lock_irqsave(&vfe->output_lock, flags); + + if (vfe->wm_output_map[wm] == VFE_LINE_NONE) { + dev_err_ratelimited(to_device(vfe), + "Received wm done for unmapped index\n"); + goto out_unlock; + } + output = &vfe->line[vfe->wm_output_map[wm]].output; + + if (output->active_buf == active_index) { + dev_err_ratelimited(to_device(vfe), + "Active buffer mismatch!\n"); + goto out_unlock; + } + output->active_buf = active_index; + + ready_buf = output->buf[!active_index]; + if (!ready_buf) { + dev_err_ratelimited(to_device(vfe), + "Missing ready buf %d %d!\n", + !active_index, output->state); + goto out_unlock; + } + + ready_buf->vb.vb2_buf.timestamp = ts; + ready_buf->vb.sequence = output->sequence++; + + /* Get next buffer */ + output->buf[!active_index] = vfe_buf_get_pending(output); + if (!output->buf[!active_index]) { + /* No next buffer - set same address */ + new_addr = ready_buf->addr; + vfe_buf_update_wm_on_last(vfe, output); + } else { + new_addr = output->buf[!active_index]->addr; + vfe_buf_update_wm_on_next(vfe, output); + } + + if (active_index) + for (i = 0; i < output->wm_num; i++) + vfe_wm_set_ping_addr(vfe, output->wm_idx[i], + new_addr[i]); + else + for (i = 0; i < output->wm_num; i++) + vfe_wm_set_pong_addr(vfe, output->wm_idx[i], + new_addr[i]); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + if (output->state == VFE_OUTPUT_STOPPING) + output->last_buffer = ready_buf; + else + vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); + + return; + +out_unlock: + spin_unlock_irqrestore(&vfe->output_lock, flags); +} + +/* + * vfe_isr_wm_done - Process composite image done interrupt + * @vfe: VFE Device + * @comp: Composite image id + */ +static void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) + if (vfe->wm_output_map[i] == VFE_LINE_PIX) { + vfe_isr_wm_done(vfe, i); + break; + } +} + +/* + * vfe_isr - ISPIF module interrupt handler + * @irq: Interrupt line + * @dev: VFE device + * + * Return IRQ_HANDLED on success + */ +static irqreturn_t vfe_isr(int irq, void *dev) +{ + struct vfe_device *vfe = dev; + u32 value0, value1; + u32 violation; + int i, j; + + value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0); + value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1); + + writel_relaxed(value0, vfe->base + VFE_0_IRQ_CLEAR_0); + writel_relaxed(value1, vfe->base + VFE_0_IRQ_CLEAR_1); + + wmb(); + writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD); + + if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK) + complete(&vfe->reset_complete); + + if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION) { + violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS); + dev_err_ratelimited(to_device(vfe), + "VFE: violation = 0x%08x\n", violation); + } + + if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK) { + complete(&vfe->halt_complete); + writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD); + } + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) + if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i)) + vfe_isr_reg_update(vfe, i); + + if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF) + vfe_isr_sof(vfe, VFE_LINE_PIX); + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) + if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i)) + vfe_isr_sof(vfe, i); + + for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++) + if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) { + vfe_isr_comp_done(vfe, i); + for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++) + if (vfe->wm_output_map[j] == VFE_LINE_PIX) + value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j); + } + + for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) + if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i)) + vfe_isr_wm_done(vfe, i); + + return IRQ_HANDLED; +} + +/* + * vfe_set_clock_rates - Calculate and set clock rates on VFE module + * @vfe: VFE device + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_set_clock_rates(struct vfe_device *vfe) +{ + struct device *dev = to_device(vfe); + u32 pixel_clock[MSM_VFE_LINE_NUM]; + int i, j; + int ret; + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) { + ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity, + &pixel_clock[i]); + if (ret) + pixel_clock[i] = 0; + } + + for (i = 0; i < vfe->nclocks; i++) { + struct camss_clock *clock = &vfe->clock[i]; + + if (!strcmp(clock->name, "camss_vfe_vfe")) { + u64 min_rate = 0; + long rate; + + for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) { + u32 tmp; + u8 bpp; + + if (j == VFE_LINE_PIX) { + tmp = pixel_clock[j]; + } else { + bpp = vfe_get_bpp(vfe->line[j]. + fmt[MSM_VFE_PAD_SINK].code); + tmp = pixel_clock[j] * bpp / 64; + } + + if (min_rate < tmp) + min_rate = tmp; + } + + camss_add_clock_margin(&min_rate); + + for (j = 0; j < clock->nfreqs; j++) + if (min_rate < clock->freq[j]) + break; + + if (j == clock->nfreqs) { + dev_err(dev, + "Pixel clock is too high for VFE"); + return -EINVAL; + } + + /* if sensor pixel clock is not available */ + /* set highest possible VFE clock rate */ + if (min_rate == 0) + j = clock->nfreqs - 1; + + rate = clk_round_rate(clock->clk, clock->freq[j]); + if (rate < 0) { + dev_err(dev, "clk round rate failed: %ld\n", + rate); + return -EINVAL; + } + + ret = clk_set_rate(clock->clk, rate); + if (ret < 0) { + dev_err(dev, "clk set rate failed: %d\n", ret); + return ret; + } + } + } + + return 0; +} + +/* + * vfe_check_clock_rates - Check current clock rates on VFE module + * @vfe: VFE device + * + * Return 0 if current clock rates are suitable for a new pipeline + * or a negative error code otherwise + */ +static int vfe_check_clock_rates(struct vfe_device *vfe) +{ + u32 pixel_clock[MSM_VFE_LINE_NUM]; + int i, j; + int ret; + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) { + ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity, + &pixel_clock[i]); + if (ret) + pixel_clock[i] = 0; + } + + for (i = 0; i < vfe->nclocks; i++) { + struct camss_clock *clock = &vfe->clock[i]; + + if (!strcmp(clock->name, "camss_vfe_vfe")) { + u64 min_rate = 0; + unsigned long rate; + + for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) { + u32 tmp; + u8 bpp; + + if (j == VFE_LINE_PIX) { + tmp = pixel_clock[j]; + } else { + bpp = vfe_get_bpp(vfe->line[j]. + fmt[MSM_VFE_PAD_SINK].code); + tmp = pixel_clock[j] * bpp / 64; + } + + if (min_rate < tmp) + min_rate = tmp; + } + + camss_add_clock_margin(&min_rate); + + rate = clk_get_rate(clock->clk); + if (rate < min_rate) + return -EBUSY; + } + } + + return 0; +} + +/* + * vfe_get - Power up and reset VFE module + * @vfe: VFE Device + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_get(struct vfe_device *vfe) +{ + int ret; + + mutex_lock(&vfe->power_lock); + + if (vfe->power_count == 0) { + ret = vfe_set_clock_rates(vfe); + if (ret < 0) + goto error_clocks; + + ret = camss_enable_clocks(vfe->nclocks, vfe->clock, + to_device(vfe)); + if (ret < 0) + goto error_clocks; + + ret = vfe_reset(vfe); + if (ret < 0) + goto error_reset; + + vfe_reset_output_maps(vfe); + + vfe_init_outputs(vfe); + } else { + ret = vfe_check_clock_rates(vfe); + if (ret < 0) + goto error_clocks; + } + vfe->power_count++; + + mutex_unlock(&vfe->power_lock); + + return 0; + +error_reset: + camss_disable_clocks(vfe->nclocks, vfe->clock); + +error_clocks: + mutex_unlock(&vfe->power_lock); + + return ret; +} + +/* + * vfe_put - Power down VFE module + * @vfe: VFE Device + */ +static void vfe_put(struct vfe_device *vfe) +{ + mutex_lock(&vfe->power_lock); + + if (vfe->power_count == 0) { + dev_err(to_device(vfe), "vfe power off on power_count == 0\n"); + goto exit; + } else if (vfe->power_count == 1) { + if (vfe->was_streaming) { + vfe->was_streaming = 0; + vfe_halt(vfe); + } + camss_disable_clocks(vfe->nclocks, vfe->clock); + } + + vfe->power_count--; + +exit: + mutex_unlock(&vfe->power_lock); +} + +/* + * vfe_video_pad_to_line - Get pointer to VFE line by media pad + * @pad: Media pad + * + * Return pointer to vfe line structure + */ +static struct vfe_line *vfe_video_pad_to_line(struct media_pad *pad) +{ + struct media_pad *vfe_pad; + struct v4l2_subdev *subdev; + + vfe_pad = media_entity_remote_pad(pad); + if (vfe_pad == NULL) + return NULL; + + subdev = media_entity_to_v4l2_subdev(vfe_pad->entity); + + return container_of(subdev, struct vfe_line, subdev); +} + +/* + * vfe_queue_buffer - Add empty buffer + * @vid: Video device structure + * @buf: Buffer to be enqueued + * + * Add an empty buffer - depending on the current number of buffers it will be + * put in pending buffer queue or directly given to the hardware to be filled. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_queue_buffer(struct camss_video *vid, + struct camss_buffer *buf) +{ + struct vfe_device *vfe = &vid->camss->vfe; + struct vfe_line *line; + struct vfe_output *output; + unsigned long flags; + + line = vfe_video_pad_to_line(&vid->pad); + if (!line) { + dev_err(to_device(vfe), "Can not queue buffer\n"); + return -1; + } + output = &line->output; + + spin_lock_irqsave(&vfe->output_lock, flags); + + vfe_buf_update_wm_on_new(vfe, output, buf); + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +/* + * vfe_flush_buffers - Return all vb2 buffers + * @vid: Video device structure + * @state: vb2 buffer state of the returned buffers + * + * Return all buffers to vb2. This includes queued pending buffers (still + * unused) and any buffers given to the hardware but again still not used. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_flush_buffers(struct camss_video *vid, + enum vb2_buffer_state state) +{ + struct vfe_device *vfe = &vid->camss->vfe; + struct vfe_line *line; + struct vfe_output *output; + unsigned long flags; + + line = vfe_video_pad_to_line(&vid->pad); + if (!line) { + dev_err(to_device(vfe), "Can not flush buffers\n"); + return -1; + } + output = &line->output; + + spin_lock_irqsave(&vfe->output_lock, flags); + + vfe_buf_flush_pending(output, state); + + if (output->buf[0]) + vb2_buffer_done(&output->buf[0]->vb.vb2_buf, state); + + if (output->buf[1]) + vb2_buffer_done(&output->buf[1]->vb.vb2_buf, state); + + if (output->last_buffer) { + vb2_buffer_done(&output->last_buffer->vb.vb2_buf, state); + output->last_buffer = NULL; + } + + spin_unlock_irqrestore(&vfe->output_lock, flags); + + return 0; +} + +/* + * vfe_set_power - Power on/off VFE module + * @sd: VFE V4L2 subdevice + * @on: Requested power state + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_set_power(struct v4l2_subdev *sd, int on) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct vfe_device *vfe = to_vfe(line); + int ret; + + if (on) { + u32 hw_version; + + ret = vfe_get(vfe); + if (ret < 0) + return ret; + + hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION); + dev_dbg(to_device(vfe), + "VFE HW Version = 0x%08x\n", hw_version); + } else { + vfe_put(vfe); + } + + return 0; +} + +/* + * vfe_set_stream - Enable/disable streaming on VFE module + * @sd: VFE V4L2 subdevice + * @enable: Requested streaming state + * + * Main configuration of VFE module is triggered here. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct vfe_device *vfe = to_vfe(line); + int ret; + + if (enable) { + ret = vfe_enable(line); + if (ret < 0) + dev_err(to_device(vfe), + "Failed to enable vfe outputs\n"); + } else { + ret = vfe_disable(line); + if (ret < 0) + dev_err(to_device(vfe), + "Failed to disable vfe outputs\n"); + } + + return ret; +} + +/* + * __vfe_get_format - Get pointer to format structure + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @pad: pad from which format is requested + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE format structure + */ +static struct v4l2_mbus_framefmt * +__vfe_get_format(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_format(&line->subdev, cfg, pad); + + return &line->fmt[pad]; +} + +/* + * __vfe_get_compose - Get pointer to compose selection structure + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE compose rectangle structure + */ +static struct v4l2_rect * +__vfe_get_compose(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_compose(&line->subdev, cfg, + MSM_VFE_PAD_SINK); + + return &line->compose; +} + +/* + * __vfe_get_crop - Get pointer to crop selection structure + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @which: TRY or ACTIVE format + * + * Return pointer to TRY or ACTIVE crop rectangle structure + */ +static struct v4l2_rect * +__vfe_get_crop(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + enum v4l2_subdev_format_whence which) +{ + if (which == V4L2_SUBDEV_FORMAT_TRY) + return v4l2_subdev_get_try_crop(&line->subdev, cfg, + MSM_VFE_PAD_SRC); + + return &line->crop; +} + +/* + * vfe_try_format - Handle try format by pad subdev method + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @pad: pad on which format is requested + * @fmt: pointer to v4l2 format structure + * @which: wanted subdev format + */ +static void vfe_try_format(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, + struct v4l2_mbus_framefmt *fmt, + enum v4l2_subdev_format_whence which) +{ + unsigned int i; + u32 code; + + switch (pad) { + case MSM_VFE_PAD_SINK: + /* Set format on sink pad */ + + for (i = 0; i < ARRAY_SIZE(vfe_formats); i++) + if (fmt->code == vfe_formats[i].code) + break; + + /* If not found, use UYVY as default */ + if (i >= ARRAY_SIZE(vfe_formats)) + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + + fmt->width = clamp_t(u32, fmt->width, 1, 8191); + fmt->height = clamp_t(u32, fmt->height, 1, 8191); + + fmt->field = V4L2_FIELD_NONE; + fmt->colorspace = V4L2_COLORSPACE_SRGB; + + break; + + case MSM_VFE_PAD_SRC: + /* Set and return a format same as sink pad */ + + code = fmt->code; + + *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, + which); + + if (line->id == VFE_LINE_PIX) { + struct v4l2_rect *rect; + + rect = __vfe_get_crop(line, cfg, which); + + fmt->width = rect->width; + fmt->height = rect->height; + + switch (fmt->code) { + case MEDIA_BUS_FMT_YUYV8_2X8: + if (code == MEDIA_BUS_FMT_YUYV8_1_5X8) + fmt->code = MEDIA_BUS_FMT_YUYV8_1_5X8; + else + fmt->code = MEDIA_BUS_FMT_YUYV8_2X8; + break; + case MEDIA_BUS_FMT_YVYU8_2X8: + if (code == MEDIA_BUS_FMT_YVYU8_1_5X8) + fmt->code = MEDIA_BUS_FMT_YVYU8_1_5X8; + else + fmt->code = MEDIA_BUS_FMT_YVYU8_2X8; + break; + case MEDIA_BUS_FMT_UYVY8_2X8: + default: + if (code == MEDIA_BUS_FMT_UYVY8_1_5X8) + fmt->code = MEDIA_BUS_FMT_UYVY8_1_5X8; + else + fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + break; + case MEDIA_BUS_FMT_VYUY8_2X8: + if (code == MEDIA_BUS_FMT_VYUY8_1_5X8) + fmt->code = MEDIA_BUS_FMT_VYUY8_1_5X8; + else + fmt->code = MEDIA_BUS_FMT_VYUY8_2X8; + break; + } + } + + break; + } + + fmt->colorspace = V4L2_COLORSPACE_SRGB; +} + +/* + * vfe_try_compose - Handle try compose selection by pad subdev method + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @rect: pointer to v4l2 rect structure + * @which: wanted subdev format + */ +static void vfe_try_compose(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_rect *rect, + enum v4l2_subdev_format_whence which) +{ + struct v4l2_mbus_framefmt *fmt; + + fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which); + + if (rect->width > fmt->width) + rect->width = fmt->width; + + if (rect->height > fmt->height) + rect->height = fmt->height; + + if (fmt->width > rect->width * SCALER_RATIO_MAX) + rect->width = (fmt->width + SCALER_RATIO_MAX - 1) / + SCALER_RATIO_MAX; + + rect->width &= ~0x1; + + if (fmt->height > rect->height * SCALER_RATIO_MAX) + rect->height = (fmt->height + SCALER_RATIO_MAX - 1) / + SCALER_RATIO_MAX; + + if (rect->width < 16) + rect->width = 16; + + if (rect->height < 4) + rect->height = 4; +} + +/* + * vfe_try_crop - Handle try crop selection by pad subdev method + * @line: VFE line + * @cfg: V4L2 subdev pad configuration + * @rect: pointer to v4l2 rect structure + * @which: wanted subdev format + */ +static void vfe_try_crop(struct vfe_line *line, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_rect *rect, + enum v4l2_subdev_format_whence which) +{ + struct v4l2_rect *compose; + + compose = __vfe_get_compose(line, cfg, which); + + if (rect->width > compose->width) + rect->width = compose->width; + + if (rect->width + rect->left > compose->width) + rect->left = compose->width - rect->width; + + if (rect->height > compose->height) + rect->height = compose->height; + + if (rect->height + rect->top > compose->height) + rect->top = compose->height - rect->height; + + /* wm in line based mode writes multiple of 16 horizontally */ + rect->left += (rect->width & 0xf) >> 1; + rect->width &= ~0xf; + + if (rect->width < 16) { + rect->left = 0; + rect->width = 16; + } + + if (rect->height < 4) { + rect->top = 0; + rect->height = 4; + } +} + +/* + * vfe_enum_mbus_code - Handle pixel format enumeration + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @code: pointer to v4l2_subdev_mbus_code_enum structure + * + * return -EINVAL or zero on success + */ +static int vfe_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + if (code->pad == MSM_VFE_PAD_SINK) { + if (code->index >= ARRAY_SIZE(vfe_formats)) + return -EINVAL; + + code->code = vfe_formats[code->index].code; + } else { + if (code->index > 0) + return -EINVAL; + + format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, + code->which); + + code->code = format->code; + } + + return 0; +} + +/* + * vfe_enum_frame_size - Handle frame size enumeration + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fse: pointer to v4l2_subdev_frame_size_enum structure + * + * Return -EINVAL or zero on success + */ +static int vfe_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt format; + + if (fse->index != 0) + return -EINVAL; + + format.code = fse->code; + format.width = 1; + format.height = 1; + vfe_try_format(line, cfg, fse->pad, &format, fse->which); + fse->min_width = format.width; + fse->min_height = format.height; + + if (format.code != fse->code) + return -EINVAL; + + format.code = fse->code; + format.width = -1; + format.height = -1; + vfe_try_format(line, cfg, fse->pad, &format, fse->which); + fse->max_width = format.width; + fse->max_height = format.height; + + return 0; +} + +/* + * vfe_get_format - Handle get format by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int vfe_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __vfe_get_format(line, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + fmt->format = *format; + + return 0; +} + +static int vfe_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel); + +/* + * vfe_set_format - Handle set format by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @fmt: pointer to v4l2 subdev format structure + * + * Return -EINVAL or zero on success + */ +static int vfe_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_mbus_framefmt *format; + + format = __vfe_get_format(line, cfg, fmt->pad, fmt->which); + if (format == NULL) + return -EINVAL; + + vfe_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which); + *format = fmt->format; + + if (fmt->pad == MSM_VFE_PAD_SINK) { + struct v4l2_subdev_selection sel = { 0 }; + int ret; + + /* Propagate the format from sink to source */ + format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SRC, + fmt->which); + + *format = fmt->format; + vfe_try_format(line, cfg, MSM_VFE_PAD_SRC, format, + fmt->which); + + if (line->id != VFE_LINE_PIX) + return 0; + + /* Reset sink pad compose selection */ + sel.which = fmt->which; + sel.pad = MSM_VFE_PAD_SINK; + sel.target = V4L2_SEL_TGT_COMPOSE; + sel.r.width = fmt->format.width; + sel.r.height = fmt->format.height; + ret = vfe_set_selection(sd, cfg, &sel); + if (ret < 0) + return ret; + } + + return 0; +} + +/* + * vfe_get_selection - Handle get selection by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @sel: pointer to v4l2 subdev selection structure + * + * Return -EINVAL or zero on success + */ +static int vfe_get_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_subdev_format fmt = { 0 }; + struct v4l2_rect *rect; + int ret; + + if (line->id != VFE_LINE_PIX) + return -EINVAL; + + if (sel->pad == MSM_VFE_PAD_SINK) + switch (sel->target) { + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + fmt.pad = sel->pad; + fmt.which = sel->which; + ret = vfe_get_format(sd, cfg, &fmt); + if (ret < 0) + return ret; + + sel->r.left = 0; + sel->r.top = 0; + sel->r.width = fmt.format.width; + sel->r.height = fmt.format.height; + break; + case V4L2_SEL_TGT_COMPOSE: + rect = __vfe_get_compose(line, cfg, sel->which); + if (rect == NULL) + return -EINVAL; + + sel->r = *rect; + break; + default: + return -EINVAL; + } + else if (sel->pad == MSM_VFE_PAD_SRC) + switch (sel->target) { + case V4L2_SEL_TGT_CROP_BOUNDS: + rect = __vfe_get_compose(line, cfg, sel->which); + if (rect == NULL) + return -EINVAL; + + sel->r.left = rect->left; + sel->r.top = rect->top; + sel->r.width = rect->width; + sel->r.height = rect->height; + break; + case V4L2_SEL_TGT_CROP: + rect = __vfe_get_crop(line, cfg, sel->which); + if (rect == NULL) + return -EINVAL; + + sel->r = *rect; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * vfe_set_selection - Handle set selection by pads subdev method + * @sd: VFE V4L2 subdevice + * @cfg: V4L2 subdev pad configuration + * @sel: pointer to v4l2 subdev selection structure + * + * Return -EINVAL or zero on success + */ +int vfe_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct vfe_line *line = v4l2_get_subdevdata(sd); + struct v4l2_rect *rect; + int ret; + + if (line->id != VFE_LINE_PIX) + return -EINVAL; + + if (sel->target == V4L2_SEL_TGT_COMPOSE && + sel->pad == MSM_VFE_PAD_SINK) { + struct v4l2_subdev_selection crop = { 0 }; + + rect = __vfe_get_compose(line, cfg, sel->which); + if (rect == NULL) + return -EINVAL; + + vfe_try_compose(line, cfg, &sel->r, sel->which); + *rect = sel->r; + + /* Reset source crop selection */ + crop.which = sel->which; + crop.pad = MSM_VFE_PAD_SRC; + crop.target = V4L2_SEL_TGT_CROP; + crop.r = *rect; + ret = vfe_set_selection(sd, cfg, &crop); + } else if (sel->target == V4L2_SEL_TGT_CROP && + sel->pad == MSM_VFE_PAD_SRC) { + struct v4l2_subdev_format fmt = { 0 }; + + rect = __vfe_get_crop(line, cfg, sel->which); + if (rect == NULL) + return -EINVAL; + + vfe_try_crop(line, cfg, &sel->r, sel->which); + *rect = sel->r; + + /* Reset source pad format width and height */ + fmt.which = sel->which; + fmt.pad = MSM_VFE_PAD_SRC; + ret = vfe_get_format(sd, cfg, &fmt); + if (ret < 0) + return ret; + + fmt.format.width = rect->width; + fmt.format.height = rect->height; + ret = vfe_set_format(sd, cfg, &fmt); + } else { + ret = -EINVAL; + } + + return ret; +} + +/* + * vfe_init_formats - Initialize formats on all pads + * @sd: VFE V4L2 subdevice + * @fh: V4L2 subdev file handle + * + * Initialize all pad formats with default values. + * + * Return 0 on success or a negative error code otherwise + */ +static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_subdev_format format = { + .pad = MSM_VFE_PAD_SINK, + .which = fh ? V4L2_SUBDEV_FORMAT_TRY : + V4L2_SUBDEV_FORMAT_ACTIVE, + .format = { + .code = MEDIA_BUS_FMT_UYVY8_2X8, + .width = 1920, + .height = 1080 + } + }; + + return vfe_set_format(sd, fh ? fh->pad : NULL, &format); +} + +/* + * msm_vfe_subdev_init - Initialize VFE device structure and resources + * @vfe: VFE device + * @res: VFE module resources table + * + * Return 0 on success or a negative error code otherwise + */ +int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res) +{ + struct device *dev = to_device(vfe); + struct platform_device *pdev = to_platform_device(dev); + struct resource *r; + struct camss *camss = to_camss(vfe); + int i, j; + int ret; + + /* Memory */ + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]); + vfe->base = devm_ioremap_resource(dev, r); + if (IS_ERR(vfe->base)) { + dev_err(dev, "could not map memory\n"); + return PTR_ERR(vfe->base); + } + + /* Interrupt */ + + r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + res->interrupt[0]); + if (!r) { + dev_err(dev, "missing IRQ\n"); + return -EINVAL; + } + + vfe->irq = r->start; + snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d", + dev_name(dev), MSM_VFE_NAME, vfe->id); + ret = devm_request_irq(dev, vfe->irq, vfe_isr, + IRQF_TRIGGER_RISING, vfe->irq_name, vfe); + if (ret < 0) { + dev_err(dev, "request_irq failed: %d\n", ret); + return ret; + } + + /* Clocks */ + + vfe->nclocks = 0; + while (res->clock[vfe->nclocks]) + vfe->nclocks++; + + vfe->clock = devm_kzalloc(dev, vfe->nclocks * sizeof(*vfe->clock), + GFP_KERNEL); + if (!vfe->clock) + return -ENOMEM; + + for (i = 0; i < vfe->nclocks; i++) { + struct camss_clock *clock = &vfe->clock[i]; + + clock->clk = devm_clk_get(dev, res->clock[i]); + if (IS_ERR(clock->clk)) + return PTR_ERR(clock->clk); + + clock->name = res->clock[i]; + + clock->nfreqs = 0; + while (res->clock_rate[i][clock->nfreqs]) + clock->nfreqs++; + + if (!clock->nfreqs) { + clock->freq = NULL; + continue; + } + + clock->freq = devm_kzalloc(dev, clock->nfreqs * + sizeof(*clock->freq), GFP_KERNEL); + if (!clock->freq) + return -ENOMEM; + + for (j = 0; j < clock->nfreqs; j++) + clock->freq[j] = res->clock_rate[i][j]; + } + + mutex_init(&vfe->power_lock); + vfe->power_count = 0; + + mutex_init(&vfe->stream_lock); + vfe->stream_count = 0; + + spin_lock_init(&vfe->output_lock); + + vfe->id = 0; + vfe->reg_update = 0; + + for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) { + vfe->line[i].video_out.type = + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + vfe->line[i].video_out.camss = camss; + vfe->line[i].id = i; + init_completion(&vfe->line[i].output.sof); + init_completion(&vfe->line[i].output.reg_update); + } + + init_completion(&vfe->reset_complete); + init_completion(&vfe->halt_complete); + + return 0; +} + +/* + * msm_vfe_get_vfe_id - Get VFE HW module id + * @entity: Pointer to VFE media entity structure + * @id: Return CSID HW module id here + */ +void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id) +{ + struct v4l2_subdev *sd; + struct vfe_line *line; + struct vfe_device *vfe; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + vfe = to_vfe(line); + + *id = vfe->id; +} + +/* + * msm_vfe_get_vfe_line_id - Get VFE line id by media entity + * @entity: Pointer to VFE media entity structure + * @id: Return VFE line id here + */ +void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id) +{ + struct v4l2_subdev *sd; + struct vfe_line *line; + + sd = media_entity_to_v4l2_subdev(entity); + line = v4l2_get_subdevdata(sd); + + *id = line->id; +} + +/* + * vfe_link_setup - Setup VFE connections + * @entity: Pointer to media entity structure + * @local: Pointer to local pad + * @remote: Pointer to remote pad + * @flags: Link flags + * + * Return 0 on success + */ +static int vfe_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + if (flags & MEDIA_LNK_FL_ENABLED) + if (media_entity_remote_pad(local)) + return -EBUSY; + + return 0; +} + +static const struct v4l2_subdev_core_ops vfe_core_ops = { + .s_power = vfe_set_power, +}; + +static const struct v4l2_subdev_video_ops vfe_video_ops = { + .s_stream = vfe_set_stream, +}; + +static const struct v4l2_subdev_pad_ops vfe_pad_ops = { + .enum_mbus_code = vfe_enum_mbus_code, + .enum_frame_size = vfe_enum_frame_size, + .get_fmt = vfe_get_format, + .set_fmt = vfe_set_format, + .get_selection = vfe_get_selection, + .set_selection = vfe_set_selection, +}; + +static const struct v4l2_subdev_ops vfe_v4l2_ops = { + .core = &vfe_core_ops, + .video = &vfe_video_ops, + .pad = &vfe_pad_ops, +}; + +static const struct v4l2_subdev_internal_ops vfe_v4l2_internal_ops = { + .open = vfe_init_formats, +}; + +static const struct media_entity_operations vfe_media_ops = { + .link_setup = vfe_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct camss_video_ops camss_vfe_video_ops = { + .queue_buffer = vfe_queue_buffer, + .flush_buffers = vfe_flush_buffers, +}; + +void msm_vfe_stop_streaming(struct vfe_device *vfe) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->line); i++) + msm_video_stop_streaming(&vfe->line[i].video_out); +} + +/* + * msm_vfe_register_entities - Register subdev node for VFE module + * @vfe: VFE device + * @v4l2_dev: V4L2 device + * + * Initialize and register a subdev node for the VFE module. Then + * call msm_video_register() to register the video device node which + * will be connected to this subdev node. Then actually create the + * media link between them. + * + * Return 0 on success or a negative error code otherwise + */ +int msm_vfe_register_entities(struct vfe_device *vfe, + struct v4l2_device *v4l2_dev) +{ + struct device *dev = to_device(vfe); + struct v4l2_subdev *sd; + struct media_pad *pads; + struct camss_video *video_out; + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(vfe->line); i++) { + char name[32]; + + sd = &vfe->line[i].subdev; + pads = vfe->line[i].pads; + video_out = &vfe->line[i].video_out; + + v4l2_subdev_init(sd, &vfe_v4l2_ops); + sd->internal_ops = &vfe_v4l2_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + if (i == VFE_LINE_PIX) + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s", + MSM_VFE_NAME, vfe->id, "pix"); + else + snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s%d", + MSM_VFE_NAME, vfe->id, "rdi", i); + + v4l2_set_subdevdata(sd, &vfe->line[i]); + + ret = vfe_init_formats(sd, NULL); + if (ret < 0) { + dev_err(dev, "Failed to init format: %d\n", ret); + goto error_init; + } + + pads[MSM_VFE_PAD_SINK].flags = MEDIA_PAD_FL_SINK; + pads[MSM_VFE_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + + sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; + sd->entity.ops = &vfe_media_ops; + ret = media_entity_pads_init(&sd->entity, MSM_VFE_PADS_NUM, + pads); + if (ret < 0) { + dev_err(dev, "Failed to init media entity: %d\n", ret); + goto error_init; + } + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret < 0) { + dev_err(dev, "Failed to register subdev: %d\n", ret); + goto error_reg_subdev; + } + + video_out->ops = &camss_vfe_video_ops; + video_out->bpl_alignment = 8; + video_out->line_based = 0; + if (i == VFE_LINE_PIX) { + video_out->bpl_alignment = 16; + video_out->line_based = 1; + } + snprintf(name, ARRAY_SIZE(name), "%s%d_%s%d", + MSM_VFE_NAME, vfe->id, "video", i); + ret = msm_video_register(video_out, v4l2_dev, name, + i == VFE_LINE_PIX ? 1 : 0); + if (ret < 0) { + dev_err(dev, "Failed to register video node: %d\n", + ret); + goto error_reg_video; + } + + ret = media_create_pad_link( + &sd->entity, MSM_VFE_PAD_SRC, + &video_out->vdev.entity, 0, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret < 0) { + dev_err(dev, "Failed to link %s->%s entities: %d\n", + sd->entity.name, video_out->vdev.entity.name, + ret); + goto error_link; + } + } + + return 0; + +error_link: + msm_video_unregister(video_out); + +error_reg_video: + v4l2_device_unregister_subdev(sd); + +error_reg_subdev: + media_entity_cleanup(&sd->entity); + +error_init: + for (i--; i >= 0; i--) { + sd = &vfe->line[i].subdev; + video_out = &vfe->line[i].video_out; + + msm_video_unregister(video_out); + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } + + return ret; +} + +/* + * msm_vfe_unregister_entities - Unregister VFE module subdev node + * @vfe: VFE device + */ +void msm_vfe_unregister_entities(struct vfe_device *vfe) +{ + int i; + + mutex_destroy(&vfe->power_lock); + mutex_destroy(&vfe->stream_lock); + + for (i = 0; i < ARRAY_SIZE(vfe->line); i++) { + struct v4l2_subdev *sd = &vfe->line[i].subdev; + struct camss_video *video_out = &vfe->line[i].video_out; + + msm_video_unregister(video_out); + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + } +} diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.h b/drivers/media/platform/qcom/camss-8x16/camss-vfe.h new file mode 100644 index 000000000000..53d5b66a9dfb --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.h @@ -0,0 +1,123 @@ +/* + * camss-vfe.h + * + * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_VFE_H +#define QC_MSM_CAMSS_VFE_H + +#include <linux/clk.h> +#include <linux/spinlock_types.h> +#include <media/media-entity.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> + +#include "camss-video.h" + +#define MSM_VFE_PAD_SINK 0 +#define MSM_VFE_PAD_SRC 1 +#define MSM_VFE_PADS_NUM 2 + +#define MSM_VFE_LINE_NUM 4 +#define MSM_VFE_IMAGE_MASTERS_NUM 7 +#define MSM_VFE_COMPOSITE_IRQ_NUM 4 + +#define MSM_VFE_VFE0_UB_SIZE 1023 +#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3) +#define MSM_VFE_VFE1_UB_SIZE 1535 +#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3) + +enum vfe_output_state { + VFE_OUTPUT_OFF, + VFE_OUTPUT_RESERVED, + VFE_OUTPUT_SINGLE, + VFE_OUTPUT_CONTINUOUS, + VFE_OUTPUT_IDLE, + VFE_OUTPUT_STOPPING +}; + +enum vfe_line_id { + VFE_LINE_NONE = -1, + VFE_LINE_RDI0 = 0, + VFE_LINE_RDI1 = 1, + VFE_LINE_RDI2 = 2, + VFE_LINE_PIX = 3 +}; + +struct vfe_output { + u8 wm_num; + u8 wm_idx[3]; + + int active_buf; + struct camss_buffer *buf[2]; + struct camss_buffer *last_buffer; + struct list_head pending_bufs; + + unsigned int drop_update_idx; + + enum vfe_output_state state; + unsigned int sequence; + int wait_sof; + int wait_reg_update; + struct completion sof; + struct completion reg_update; +}; + +struct vfe_line { + enum vfe_line_id id; + struct v4l2_subdev subdev; + struct media_pad pads[MSM_VFE_PADS_NUM]; + struct v4l2_mbus_framefmt fmt[MSM_VFE_PADS_NUM]; + struct v4l2_rect compose; + struct v4l2_rect crop; + struct camss_video video_out; + struct vfe_output output; +}; + +struct vfe_device { + u8 id; + void __iomem *base; + u32 irq; + char irq_name[30]; + struct camss_clock *clock; + int nclocks; + struct completion reset_complete; + struct completion halt_complete; + struct mutex power_lock; + int power_count; + struct mutex stream_lock; + int stream_count; + spinlock_t output_lock; + enum vfe_line_id wm_output_map[MSM_VFE_IMAGE_MASTERS_NUM]; + struct vfe_line line[MSM_VFE_LINE_NUM]; + u32 reg_update; + u8 was_streaming; +}; + +struct resources; + +int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res); + +int msm_vfe_register_entities(struct vfe_device *vfe, + struct v4l2_device *v4l2_dev); + +void msm_vfe_unregister_entities(struct vfe_device *vfe); + +void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id); +void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id); + +void msm_vfe_stop_streaming(struct vfe_device *vfe); + +#endif /* QC_MSM_CAMSS_VFE_H */ diff --git a/drivers/media/platform/qcom/camss-8x16/camss-video.c b/drivers/media/platform/qcom/camss-8x16/camss-video.c new file mode 100644 index 000000000000..2998ad677bee --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-video.c @@ -0,0 +1,860 @@ +/* + * camss-video.c + * + * Qualcomm MSM Camera Subsystem - V4L2 device node + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/slab.h> +#include <media/media-entity.h> +#include <media/v4l2-dev.h> +#include <media/v4l2-device.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-mc.h> +#include <media/videobuf-core.h> +#include <media/videobuf2-dma-sg.h> + +#include "camss-video.h" +#include "camss.h" + +struct fract { + u8 numerator; + u8 denominator; +}; + +/* + * struct camss_format_info - ISP media bus format information + * @code: V4L2 media bus format code + * @pixelformat: V4L2 pixel format FCC identifier + * @planes: Number of planes + * @hsub: Horizontal subsampling (for each plane) + * @vsub: Vertical subsampling (for each plane) + * @bpp: Bits per pixel when stored in memory (for each plane) + */ +struct camss_format_info { + u32 code; + u32 pixelformat; + u8 planes; + struct fract hsub[3]; + struct fract vsub[3]; + unsigned int bpp[3]; +}; + +static const struct camss_format_info formats_rdi[] = { + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1, + { { 1, 1 } }, { { 1, 1 } }, { 16 } }, + { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1, + { { 1, 1 } }, { { 1, 1 } }, { 8 } }, + { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 10 } }, + { MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, + { MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1, + { { 1, 1 } }, { { 1, 1 } }, { 12 } }, +}; + +static const struct camss_format_info formats_pix[] = { + { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV12, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1, + { { 1, 1 } }, { { 2, 3 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, + { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1, + { { 1, 1 } }, { { 1, 2 } }, { 8 } }, +}; + +/* ----------------------------------------------------------------------------- + * Helper functions + */ + +static int video_find_format(u32 code, u32 pixelformat, + const struct camss_format_info *formats, + unsigned int nformats) +{ + int i; + + for (i = 0; i < nformats; i++) { + if (formats[i].code == code && + formats[i].pixelformat == pixelformat) + return i; + } + + for (i = 0; i < nformats; i++) + if (formats[i].code == code) + return i; + + WARN_ON(1); + + return -EINVAL; +} + +/* + * video_mbus_to_pix_mp - Convert v4l2_mbus_framefmt to v4l2_pix_format_mplane + * @mbus: v4l2_mbus_framefmt format (input) + * @pix: v4l2_pix_format_mplane format (output) + * @f: a pointer to formats array element to be used for the conversion + * @alignment: bytesperline alignment value + * + * Fill the output pix structure with information from the input mbus format. + * + * Return 0 on success or a negative error code otherwise + */ +static int video_mbus_to_pix_mp(const struct v4l2_mbus_framefmt *mbus, + struct v4l2_pix_format_mplane *pix, + const struct camss_format_info *f, + unsigned int alignment) +{ + unsigned int i; + u32 bytesperline; + + memset(pix, 0, sizeof(*pix)); + v4l2_fill_pix_format_mplane(pix, mbus); + pix->pixelformat = f->pixelformat; + pix->num_planes = f->planes; + for (i = 0; i < pix->num_planes; i++) { + bytesperline = pix->width / f->hsub[i].numerator * + f->hsub[i].denominator * f->bpp[i] / 8; + bytesperline = ALIGN(bytesperline, alignment); + pix->plane_fmt[i].bytesperline = bytesperline; + pix->plane_fmt[i].sizeimage = pix->height / + f->vsub[i].numerator * f->vsub[i].denominator * + bytesperline; + } + + return 0; +} + +static struct v4l2_subdev *video_remote_subdev(struct camss_video *video, + u32 *pad) +{ + struct media_pad *remote; + + remote = media_entity_remote_pad(&video->pad); + + if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) + return NULL; + + if (pad) + *pad = remote->index; + + return media_entity_to_v4l2_subdev(remote->entity); +} + +static int video_get_subdev_format(struct camss_video *video, + struct v4l2_format *format) +{ + struct v4l2_subdev_format fmt; + struct v4l2_subdev *subdev; + u32 pad; + int ret; + + subdev = video_remote_subdev(video, &pad); + if (subdev == NULL) + return -EPIPE; + + fmt.pad = pad; + fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; + + ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); + if (ret) + return ret; + + ret = video_find_format(fmt.format.code, + format->fmt.pix_mp.pixelformat, + video->formats, video->nformats); + if (ret < 0) + return ret; + + format->type = video->type; + + return video_mbus_to_pix_mp(&fmt.format, &format->fmt.pix_mp, + &video->formats[ret], video->bpl_alignment); +} + +/* ----------------------------------------------------------------------------- + * Video queue operations + */ + +static int video_queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct camss_video *video = vb2_get_drv_priv(q); + const struct v4l2_pix_format_mplane *format = + &video->active_fmt.fmt.pix_mp; + unsigned int i; + + if (*num_planes) { + if (*num_planes != format->num_planes) + return -EINVAL; + + for (i = 0; i < *num_planes; i++) + if (sizes[i] < format->plane_fmt[i].sizeimage) + return -EINVAL; + + return 0; + } + + *num_planes = format->num_planes; + + for (i = 0; i < *num_planes; i++) + sizes[i] = format->plane_fmt[i].sizeimage; + + return 0; +} + +static int video_buf_init(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue); + struct camss_buffer *buffer = container_of(vbuf, struct camss_buffer, + vb); + const struct v4l2_pix_format_mplane *format = + &video->active_fmt.fmt.pix_mp; + struct sg_table *sgt; + unsigned int i; + + for (i = 0; i < format->num_planes; i++) { + sgt = vb2_dma_sg_plane_desc(vb, i); + if (!sgt) + return -EFAULT; + + buffer->addr[i] = sg_dma_address(sgt->sgl); + } + + if (format->pixelformat == V4L2_PIX_FMT_NV12 || + format->pixelformat == V4L2_PIX_FMT_NV21 || + format->pixelformat == V4L2_PIX_FMT_NV16 || + format->pixelformat == V4L2_PIX_FMT_NV61) + buffer->addr[1] = buffer->addr[0] + + format->plane_fmt[0].bytesperline * + format->height; + + return 0; +} + +static int video_buf_prepare(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue); + const struct v4l2_pix_format_mplane *format = + &video->active_fmt.fmt.pix_mp; + unsigned int i; + + for (i = 0; i < format->num_planes; i++) { + if (format->plane_fmt[i].sizeimage > vb2_plane_size(vb, i)) + return -EINVAL; + + vb2_set_plane_payload(vb, i, format->plane_fmt[i].sizeimage); + } + + vbuf->field = V4L2_FIELD_NONE; + + return 0; +} + +static void video_buf_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue); + struct camss_buffer *buffer = container_of(vbuf, struct camss_buffer, + vb); + + video->ops->queue_buffer(video, buffer); +} + +static int video_check_format(struct camss_video *video) +{ + struct v4l2_pix_format_mplane *pix = &video->active_fmt.fmt.pix_mp; + struct v4l2_format format; + struct v4l2_pix_format_mplane *sd_pix = &format.fmt.pix_mp; + int ret; + + sd_pix->pixelformat = pix->pixelformat; + ret = video_get_subdev_format(video, &format); + if (ret < 0) + return ret; + + if (pix->pixelformat != sd_pix->pixelformat || + pix->height != sd_pix->height || + pix->width != sd_pix->width || + pix->num_planes != sd_pix->num_planes || + pix->field != format.fmt.pix_mp.field) + return -EPIPE; + + return 0; +} + +static int video_start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct camss_video *video = vb2_get_drv_priv(q); + struct video_device *vdev = &video->vdev; + struct media_entity *entity; + struct media_pad *pad; + struct v4l2_subdev *subdev; + int ret; + + ret = media_entity_pipeline_start(&vdev->entity, &video->pipe); + if (ret < 0) + return ret; + + ret = video_check_format(video); + if (ret < 0) + goto error; + + entity = &vdev->entity; + while (1) { + pad = &entity->pads[0]; + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + break; + + pad = media_entity_remote_pad(pad); + if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) + break; + + entity = pad->entity; + subdev = media_entity_to_v4l2_subdev(entity); + + ret = v4l2_subdev_call(subdev, video, s_stream, 1); + if (ret < 0 && ret != -ENOIOCTLCMD) + goto error; + } + + return 0; + +error: + media_entity_pipeline_stop(&vdev->entity); + + video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED); + + return ret; +} + +static void video_stop_streaming(struct vb2_queue *q) +{ + struct camss_video *video = vb2_get_drv_priv(q); + struct video_device *vdev = &video->vdev; + struct media_entity *entity; + struct media_pad *pad; + struct v4l2_subdev *subdev; + + entity = &vdev->entity; + while (1) { + pad = &entity->pads[0]; + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + break; + + pad = media_entity_remote_pad(pad); + if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) + break; + + entity = pad->entity; + subdev = media_entity_to_v4l2_subdev(entity); + + v4l2_subdev_call(subdev, video, s_stream, 0); + } + + media_entity_pipeline_stop(&vdev->entity); + + video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR); +} + +static const struct vb2_ops msm_video_vb2_q_ops = { + .queue_setup = video_queue_setup, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .buf_init = video_buf_init, + .buf_prepare = video_buf_prepare, + .buf_queue = video_buf_queue, + .start_streaming = video_start_streaming, + .stop_streaming = video_stop_streaming, +}; + +/* ----------------------------------------------------------------------------- + * V4L2 ioctls + */ + +static int video_querycap(struct file *file, void *fh, + struct v4l2_capability *cap) +{ + struct camss_video *video = video_drvdata(file); + + strlcpy(cap->driver, "qcom-camss", sizeof(cap->driver)); + strlcpy(cap->card, "Qualcomm Camera Subsystem", sizeof(cap->card)); + snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", + dev_name(video->camss->dev)); + + return 0; +} + +static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) +{ + struct camss_video *video = video_drvdata(file); + int i, j, k; + + if (f->type != video->type) + return -EINVAL; + + if (f->index >= video->nformats) + return -EINVAL; + + /* find index "i" of "k"th unique pixelformat in formats array */ + k = -1; + for (i = 0; i < video->nformats; i++) { + for (j = 0; j < i; j++) { + if (video->formats[i].pixelformat == + video->formats[j].pixelformat) + break; + } + + if (j == i) + k++; + + if (k == f->index) + break; + } + + if (k < f->index) + return -EINVAL; + + f->pixelformat = video->formats[i].pixelformat; + + return 0; +} + +static int video_g_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct camss_video *video = video_drvdata(file); + + *f = video->active_fmt; + + return 0; +} + +static int __video_try_fmt(struct camss_video *video, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pix_mp; + const struct camss_format_info *fi; + struct v4l2_plane_pix_format *p; + u32 bytesperline[3] = { 0 }; + u32 sizeimage[3] = { 0 }; + u32 width, height; + u32 bpl, lines; + int i, j; + + pix_mp = &f->fmt.pix_mp; + + if (video->line_based) + for (i = 0; i < pix_mp->num_planes && i < 3; i++) { + p = &pix_mp->plane_fmt[i]; + bytesperline[i] = clamp_t(u32, p->bytesperline, + 1, 65528); + sizeimage[i] = clamp_t(u32, p->sizeimage, + bytesperline[i], + bytesperline[i] * 4096); + } + + for (j = 0; j < video->nformats; j++) + if (pix_mp->pixelformat == video->formats[j].pixelformat) + break; + + if (j == video->nformats) + j = 0; /* default format */ + + fi = &video->formats[j]; + width = pix_mp->width; + height = pix_mp->height; + + memset(pix_mp, 0, sizeof(*pix_mp)); + + pix_mp->pixelformat = fi->pixelformat; + pix_mp->width = clamp_t(u32, width, 1, 8191); + pix_mp->height = clamp_t(u32, height, 1, 8191); + pix_mp->num_planes = fi->planes; + for (i = 0; i < pix_mp->num_planes; i++) { + bpl = pix_mp->width / fi->hsub[i].numerator * + fi->hsub[i].denominator * fi->bpp[i] / 8; + bpl = ALIGN(bpl, video->bpl_alignment); + pix_mp->plane_fmt[i].bytesperline = bpl; + pix_mp->plane_fmt[i].sizeimage = pix_mp->height / + fi->vsub[i].numerator * fi->vsub[i].denominator * bpl; + } + + pix_mp->field = V4L2_FIELD_NONE; + pix_mp->colorspace = V4L2_COLORSPACE_SRGB; + pix_mp->flags = 0; + pix_mp->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix_mp->colorspace); + pix_mp->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, + pix_mp->colorspace, pix_mp->ycbcr_enc); + pix_mp->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix_mp->colorspace); + + if (video->line_based) + for (i = 0; i < pix_mp->num_planes; i++) { + p = &pix_mp->plane_fmt[i]; + p->bytesperline = clamp_t(u32, p->bytesperline, + 1, 65528); + p->sizeimage = clamp_t(u32, p->sizeimage, + p->bytesperline, + p->bytesperline * 4096); + lines = p->sizeimage / p->bytesperline; + + if (p->bytesperline < bytesperline[i]) + p->bytesperline = ALIGN(bytesperline[i], 8); + + if (p->sizeimage < p->bytesperline * lines) + p->sizeimage = p->bytesperline * lines; + + if (p->sizeimage < sizeimage[i]) + p->sizeimage = sizeimage[i]; + } + + return 0; +} + +static int video_try_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct camss_video *video = video_drvdata(file); + + return __video_try_fmt(video, f); +} + +static int video_s_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct camss_video *video = video_drvdata(file); + int ret; + + if (vb2_is_busy(&video->vb2_q)) + return -EBUSY; + + ret = __video_try_fmt(video, f); + if (ret < 0) + return ret; + + video->active_fmt = *f; + + return 0; +} + +static int video_enum_input(struct file *file, void *fh, + struct v4l2_input *input) +{ + if (input->index > 0) + return -EINVAL; + + strlcpy(input->name, "camera", sizeof(input->name)); + input->type = V4L2_INPUT_TYPE_CAMERA; + + return 0; +} + +static int video_g_input(struct file *file, void *fh, unsigned int *input) +{ + *input = 0; + + return 0; +} + +static int video_s_input(struct file *file, void *fh, unsigned int input) +{ + return input == 0 ? 0 : -EINVAL; +} + +static const struct v4l2_ioctl_ops msm_vid_ioctl_ops = { + .vidioc_querycap = video_querycap, + .vidioc_enum_fmt_vid_cap_mplane = video_enum_fmt, + .vidioc_g_fmt_vid_cap_mplane = video_g_fmt, + .vidioc_s_fmt_vid_cap_mplane = video_s_fmt, + .vidioc_try_fmt_vid_cap_mplane = video_try_fmt, + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_enum_input = video_enum_input, + .vidioc_g_input = video_g_input, + .vidioc_s_input = video_s_input, +}; + +/* ----------------------------------------------------------------------------- + * V4L2 file operations + */ + +static int video_open(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct camss_video *video = video_drvdata(file); + struct v4l2_fh *vfh; + int ret; + + mutex_lock(&video->lock); + + vfh = kzalloc(sizeof(*vfh), GFP_KERNEL); + if (vfh == NULL) { + ret = -ENOMEM; + goto error_alloc; + } + + v4l2_fh_init(vfh, vdev); + v4l2_fh_add(vfh); + + file->private_data = vfh; + + ret = v4l2_pipeline_pm_use(&vdev->entity, 1); + if (ret < 0) { + dev_err(video->camss->dev, "Failed to power up pipeline: %d\n", + ret); + goto error_pm_use; + } + + mutex_unlock(&video->lock); + + return 0; + +error_pm_use: + v4l2_fh_release(file); + +error_alloc: + mutex_unlock(&video->lock); + + return ret; +} + +static int video_release(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + + vb2_fop_release(file); + + v4l2_pipeline_pm_use(&vdev->entity, 0); + + file->private_data = NULL; + + return 0; +} + +static const struct v4l2_file_operations msm_vid_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = video_ioctl2, + .open = video_open, + .release = video_release, + .poll = vb2_fop_poll, + .mmap = vb2_fop_mmap, + .read = vb2_fop_read, +}; + +/* ----------------------------------------------------------------------------- + * CAMSS video core + */ + +static void msm_video_release(struct video_device *vdev) +{ + struct camss_video *video = video_get_drvdata(vdev); + + media_entity_cleanup(&vdev->entity); + + mutex_destroy(&video->q_lock); + mutex_destroy(&video->lock); + + if (atomic_dec_and_test(&video->camss->ref_count)) + camss_delete(video->camss); +} + +/* + * msm_video_init_format - Helper function to initialize format + * @video: struct camss_video + * + * Initialize pad format with default value. + * + * Return 0 on success or a negative error code otherwise + */ +static int msm_video_init_format(struct camss_video *video) +{ + int ret; + struct v4l2_format format = { + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + .fmt.pix_mp = { + .width = 1920, + .height = 1080, + .pixelformat = video->formats[0].pixelformat, + }, + }; + + ret = __video_try_fmt(video, &format); + if (ret < 0) + return ret; + + video->active_fmt = format; + + return 0; +} + +/* + * msm_video_register - Register a video device node + * @video: struct camss_video + * @v4l2_dev: V4L2 device + * @name: name to be used for the video device node + * + * Initialize and register a video device node to a V4L2 device. Also + * initialize the vb2 queue. + * + * Return 0 on success or a negative error code otherwise + */ + +int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev, + const char *name, int is_pix) +{ + struct media_pad *pad = &video->pad; + struct video_device *vdev; + struct vb2_queue *q; + int ret; + + vdev = &video->vdev; + + mutex_init(&video->q_lock); + + q = &video->vb2_q; + q->drv_priv = video; + q->mem_ops = &vb2_dma_sg_memops; + q->ops = &msm_video_vb2_q_ops; + q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + q->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ; + q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + q->buf_struct_size = sizeof(struct camss_buffer); + q->dev = video->camss->dev; + q->lock = &video->q_lock; + ret = vb2_queue_init(q); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to init vb2 queue: %d\n", ret); + goto error_vb2_init; + } + + pad->flags = MEDIA_PAD_FL_SINK; + ret = media_entity_pads_init(&vdev->entity, 1, pad); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to init video entity: %d\n", + ret); + goto error_media_init; + } + + mutex_init(&video->lock); + + video->formats = formats_rdi; + video->nformats = ARRAY_SIZE(formats_rdi); + if (is_pix) { + video->formats = formats_pix; + video->nformats = ARRAY_SIZE(formats_pix); + } + + ret = msm_video_init_format(video); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to init format: %d\n", ret); + goto error_video_register; + } + + vdev->fops = &msm_vid_fops; + vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING | + V4L2_CAP_READWRITE; + vdev->ioctl_ops = &msm_vid_ioctl_ops; + vdev->release = msm_video_release; + vdev->v4l2_dev = v4l2_dev; + vdev->vfl_dir = VFL_DIR_RX; + vdev->queue = &video->vb2_q; + vdev->lock = &video->lock; + strlcpy(vdev->name, name, sizeof(vdev->name)); + + ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + if (ret < 0) { + dev_err(v4l2_dev->dev, "Failed to register video device: %d\n", + ret); + goto error_video_register; + } + + video_set_drvdata(vdev, video); + atomic_inc(&video->camss->ref_count); + + return 0; + +error_video_register: + media_entity_cleanup(&vdev->entity); + mutex_destroy(&video->lock); +error_media_init: + vb2_queue_release(&video->vb2_q); +error_vb2_init: + mutex_destroy(&video->q_lock); + + return ret; +} + +void msm_video_stop_streaming(struct camss_video *video) +{ + if (vb2_is_streaming(&video->vb2_q)) + vb2_queue_release(&video->vb2_q); +} + +void msm_video_unregister(struct camss_video *video) +{ + atomic_inc(&video->camss->ref_count); + video_unregister_device(&video->vdev); + atomic_dec(&video->camss->ref_count); +} diff --git a/drivers/media/platform/qcom/camss-8x16/camss-video.h b/drivers/media/platform/qcom/camss-8x16/camss-video.h new file mode 100644 index 000000000000..38bd1f2eec54 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss-video.h @@ -0,0 +1,70 @@ +/* + * camss-video.h + * + * Qualcomm MSM Camera Subsystem - V4L2 device node + * + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_VIDEO_H +#define QC_MSM_CAMSS_VIDEO_H + +#include <linux/mutex.h> +#include <linux/videodev2.h> +#include <media/media-entity.h> +#include <media/v4l2-dev.h> +#include <media/v4l2-device.h> +#include <media/v4l2-fh.h> +#include <media/v4l2-mediabus.h> +#include <media/videobuf2-v4l2.h> + +struct camss_buffer { + struct vb2_v4l2_buffer vb; + dma_addr_t addr[3]; + struct list_head queue; +}; + +struct camss_video; + +struct camss_video_ops { + int (*queue_buffer)(struct camss_video *vid, struct camss_buffer *buf); + int (*flush_buffers)(struct camss_video *vid, + enum vb2_buffer_state state); +}; + +struct camss_format_info; + +struct camss_video { + struct camss *camss; + struct vb2_queue vb2_q; + struct video_device vdev; + struct media_pad pad; + struct v4l2_format active_fmt; + enum v4l2_buf_type type; + struct media_pipeline pipe; + const struct camss_video_ops *ops; + struct mutex lock; + struct mutex q_lock; + unsigned int bpl_alignment; + unsigned int line_based; + const struct camss_format_info *formats; + unsigned int nformats; +}; + +void msm_video_stop_streaming(struct camss_video *video); + +int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev, + const char *name, int is_pix); + +void msm_video_unregister(struct camss_video *video); + +#endif /* QC_MSM_CAMSS_VIDEO_H */ diff --git a/drivers/media/platform/qcom/camss-8x16/camss.c b/drivers/media/platform/qcom/camss-8x16/camss.c new file mode 100644 index 000000000000..102829355bf0 --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss.c @@ -0,0 +1,744 @@ +/* + * camss.c + * + * Qualcomm MSM Camera Subsystem - Core + * + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/clk.h> +#include <linux/media-bus-format.h> +#include <linux/media.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/videodev2.h> + +#include <media/media-device.h> +#include <media/v4l2-async.h> +#include <media/v4l2-device.h> +#include <media/v4l2-mc.h> +#include <media/v4l2-of.h> + +#include "camss.h" + +#define CAMSS_CLOCK_MARGIN_NUMERATOR 105 +#define CAMSS_CLOCK_MARGIN_DENOMINATOR 100 + +static const struct resources csiphy_res[] = { + /* CSIPHY0 */ + { + .regulator = { NULL }, + .clock = { "camss_top_ahb", "ispif_ahb", + "camss_ahb", "csiphy0_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 } }, + .reg = { "csiphy0", "csiphy0_clk_mux" }, + .interrupt = { "csiphy0" } + }, + + /* CSIPHY1 */ + { + .regulator = { NULL }, + .clock = { "camss_top_ahb", "ispif_ahb", + "camss_ahb", "csiphy1_timer" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 } }, + .reg = { "csiphy1", "csiphy1_clk_mux" }, + .interrupt = { "csiphy1" } + } +}; + +static const struct resources csid_res[] = { + /* CSID0 */ + { + .regulator = { "vdda" }, + .clock = { "camss_top_ahb", "ispif_ahb", + "csi0_ahb", "camss_ahb", + "csi0", "csi0_phy", "csi0_pix", "csi0_rdi" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid0" }, + .interrupt = { "csid0" } + }, + + /* CSID1 */ + { + .regulator = { "vdda" }, + .clock = { "camss_top_ahb", "ispif_ahb", + "csi1_ahb", "camss_ahb", + "csi1", "csi1_phy", "csi1_pix", "csi1_rdi" }, + .clock_rate = { { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 100000000, 200000000 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "csid1" }, + .interrupt = { "csid1" } + }, +}; + +static const struct resources_ispif ispif_res = { + /* ISPIF */ + .clock = { "camss_top_ahb", "camss_ahb", "ispif_ahb", + "csi0", "csi0_pix", "csi0_rdi", + "csi1", "csi1_pix", "csi1_rdi" }, + .clock_for_reset = { "camss_vfe_vfe", "camss_csi_vfe" }, + .reg = { "ispif", "csi_clk_mux" }, + .interrupt = "ispif" + +}; + +static const struct resources vfe_res = { + /* VFE0 */ + .regulator = { NULL }, + .clock = { "camss_top_ahb", "camss_vfe_vfe", "camss_csi_vfe", + "iface", "bus", "camss_ahb" }, + .clock_rate = { { 0 }, + { 50000000, 80000000, 100000000, 160000000, + 177780000, 200000000, 266670000, 320000000, + 400000000, 465000000 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 }, + { 0 } }, + .reg = { "vfe0" }, + .interrupt = { "vfe0" } +}; + +/* + * camss_add_clock_margin - Add margin to clock frequency rate + * @rate: Clock frequency rate + * + * When making calculations with physical clock frequency values + * some safety margin must be added. Add it. + */ +inline void camss_add_clock_margin(u64 *rate) +{ + *rate *= CAMSS_CLOCK_MARGIN_NUMERATOR; + *rate = div_u64(*rate, CAMSS_CLOCK_MARGIN_DENOMINATOR); +} + +/* + * camss_enable_clocks - Enable multiple clocks + * @nclocks: Number of clocks in clock array + * @clock: Clock array + * @dev: Device + * + * Return 0 on success or a negative error code otherwise + */ +int camss_enable_clocks(int nclocks, struct camss_clock *clock, + struct device *dev) +{ + int ret; + int i; + + for (i = 0; i < nclocks; i++) { + ret = clk_prepare_enable(clock[i].clk); + if (ret) { + dev_err(dev, "clock enable failed: %d\n", ret); + goto error; + } + } + + return 0; + +error: + for (i--; i >= 0; i--) + clk_disable_unprepare(clock[i].clk); + + return ret; +} + +/* + * camss_disable_clocks - Disable multiple clocks + * @nclocks: Number of clocks in clock array + * @clock: Clock array + */ +void camss_disable_clocks(int nclocks, struct camss_clock *clock) +{ + int i; + + for (i = nclocks - 1; i >= 0; i--) + clk_disable_unprepare(clock[i].clk); +} + +/* + * camss_find_sensor - Find a linked media entity which represents a sensor + * @entity: Media entity to start searching from + * + * Return a pointer to sensor media entity or NULL if not found + */ +static struct media_entity *camss_find_sensor(struct media_entity *entity) +{ + struct media_pad *pad; + + while (1) { + pad = &entity->pads[0]; + if (!(pad->flags & MEDIA_PAD_FL_SINK)) + return NULL; + + pad = media_entity_remote_pad(pad); + if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) + return NULL; + + entity = pad->entity; + + if (entity->function == MEDIA_ENT_F_CAM_SENSOR) + return entity; + } +} + +/* + * camss_get_pixel_clock - Get pixel clock rate from sensor + * @entity: Media entity in the current pipeline + * @pixel_clock: Received pixel clock value + * + * Return 0 on success or a negative error code otherwise + */ +int camss_get_pixel_clock(struct media_entity *entity, u32 *pixel_clock) +{ + struct media_entity *sensor; + struct v4l2_subdev *subdev; + struct v4l2_ctrl *ctrl; + + sensor = camss_find_sensor(entity); + if (!sensor) + return -ENODEV; + + subdev = media_entity_to_v4l2_subdev(sensor); + + ctrl = v4l2_ctrl_find(subdev->ctrl_handler, V4L2_CID_PIXEL_RATE); + + if (!ctrl) + return -EINVAL; + + *pixel_clock = v4l2_ctrl_g_ctrl_int64(ctrl); + + return 0; +} + +/* + * camss_of_parse_endpoint_node - Parse port endpoint node + * @dev: Device + * @node: Device node to be parsed + * @csd: Parsed data from port endpoint node + * + * Return 0 on success or a negative error code on failure + */ +static int camss_of_parse_endpoint_node(struct device *dev, + struct device_node *node, + struct camss_async_subdev *csd) +{ + struct csiphy_lanes_cfg *lncfg = &csd->interface.csi2.lane_cfg; + struct v4l2_of_bus_mipi_csi2 *mipi_csi2; + struct v4l2_of_endpoint vep = { { 0 } }; + unsigned int i; + + v4l2_of_parse_endpoint(node, &vep); + + csd->interface.csiphy_id = vep.base.port; + + mipi_csi2 = &vep.bus.mipi_csi2; + lncfg->clk.pos = mipi_csi2->clock_lane; + lncfg->clk.pol = mipi_csi2->lane_polarities[0]; + lncfg->num_data = mipi_csi2->num_data_lanes; + + lncfg->data = devm_kzalloc(dev, lncfg->num_data * sizeof(*lncfg->data), + GFP_KERNEL); + if (!lncfg->data) + return -ENOMEM; + + for (i = 0; i < lncfg->num_data; i++) { + lncfg->data[i].pos = mipi_csi2->data_lanes[i]; + lncfg->data[i].pol = mipi_csi2->lane_polarities[i + 1]; + } + + return 0; +} + +/* + * camss_of_parse_ports - Parse ports node + * @dev: Device + * @notifier: v4l2_device notifier data + * + * Return number of "port" nodes found in "ports" node + */ +static int camss_of_parse_ports(struct device *dev, + struct v4l2_async_notifier *notifier) +{ + struct device_node *node = NULL; + unsigned int size, i; + int ret; + + while ((node = of_graph_get_next_endpoint(dev->of_node, node))) + if (of_device_is_available(node)) + notifier->num_subdevs++; + + size = sizeof(*notifier->subdevs) * notifier->num_subdevs; + notifier->subdevs = devm_kzalloc(dev, size, GFP_KERNEL); + if (!notifier->subdevs) { + dev_err(dev, "Failed to allocate memory\n"); + return -ENOMEM; + } + + i = 0; + while ((node = of_graph_get_next_endpoint(dev->of_node, node))) { + struct camss_async_subdev *csd; + + if (!of_device_is_available(node)) + continue; + + csd = devm_kzalloc(dev, sizeof(*csd), GFP_KERNEL); + if (!csd) { + of_node_put(node); + dev_err(dev, "Failed to allocate memory\n"); + return -ENOMEM; + } + + notifier->subdevs[i++] = &csd->asd; + + ret = camss_of_parse_endpoint_node(dev, node, csd); + if (ret < 0) { + of_node_put(node); + return ret; + } + + csd->asd.match.of.node = of_graph_get_remote_port_parent(node); + of_node_put(node); + if (!csd->asd.match.of.node) { + dev_err(dev, "Bad remote port parent\n"); + return -EINVAL; + } + + csd->asd.match_type = V4L2_ASYNC_MATCH_OF; + } + + return notifier->num_subdevs; +} + +/* + * camss_init_subdevices - Initialize subdev structures and resources + * @camss: CAMSS device + * + * Return 0 on success or a negative error code on failure + */ +static int camss_init_subdevices(struct camss *camss) +{ + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) { + ret = msm_csiphy_subdev_init(&camss->csiphy[i], + &csiphy_res[i], i); + if (ret < 0) { + dev_err(camss->dev, + "Failed to init csiphy%d sub-device: %d\n", + i, ret); + return ret; + } + } + + for (i = 0; i < ARRAY_SIZE(camss->csid); i++) { + ret = msm_csid_subdev_init(&camss->csid[i], + &csid_res[i], i); + if (ret < 0) { + dev_err(camss->dev, + "Failed to init csid%d sub-device: %d\n", + i, ret); + return ret; + } + } + + ret = msm_ispif_subdev_init(&camss->ispif, &ispif_res); + if (ret < 0) { + dev_err(camss->dev, "Failed to init ispif sub-device: %d\n", + ret); + return ret; + } + + ret = msm_vfe_subdev_init(&camss->vfe, &vfe_res); + if (ret < 0) { + dev_err(camss->dev, "Fail to init vfe sub-device: %d\n", ret); + return ret; + } + + return 0; +} + +/* + * camss_register_entities - Register subdev nodes and create links + * @camss: CAMSS device + * + * Return 0 on success or a negative error code on failure + */ +static int camss_register_entities(struct camss *camss) +{ + int i, j; + int ret; + + for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) { + ret = msm_csiphy_register_entity(&camss->csiphy[i], + &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, + "Failed to register csiphy%d entity: %d\n", + i, ret); + goto err_reg_csiphy; + } + } + + for (i = 0; i < ARRAY_SIZE(camss->csid); i++) { + ret = msm_csid_register_entity(&camss->csid[i], + &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, + "Failed to register csid%d entity: %d\n", + i, ret); + goto err_reg_csid; + } + } + + ret = msm_ispif_register_entities(&camss->ispif, &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, "Failed to register ispif entities: %d\n", + ret); + goto err_reg_ispif; + } + + ret = msm_vfe_register_entities(&camss->vfe, &camss->v4l2_dev); + if (ret < 0) { + dev_err(camss->dev, "Failed to register vfe entities: %d\n", + ret); + goto err_reg_vfe; + } + + for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) { + for (j = 0; j < ARRAY_SIZE(camss->csid); j++) { + ret = media_create_pad_link( + &camss->csiphy[i].subdev.entity, + MSM_CSIPHY_PAD_SRC, + &camss->csid[j].subdev.entity, + MSM_CSID_PAD_SINK, + 0); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + camss->csiphy[i].subdev.entity.name, + camss->csid[j].subdev.entity.name, + ret); + goto err_link; + } + } + } + + for (i = 0; i < ARRAY_SIZE(camss->csid); i++) { + for (j = 0; j < ARRAY_SIZE(camss->ispif.line); j++) { + ret = media_create_pad_link( + &camss->csid[i].subdev.entity, + MSM_CSID_PAD_SRC, + &camss->ispif.line[j].subdev.entity, + MSM_ISPIF_PAD_SINK, + 0); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + camss->csid[i].subdev.entity.name, + camss->ispif.line[j].subdev.entity.name, + ret); + goto err_link; + } + } + } + + for (i = 0; i < ARRAY_SIZE(camss->ispif.line); i++) { + for (j = 0; j < ARRAY_SIZE(camss->vfe.line); j++) { + ret = media_create_pad_link( + &camss->ispif.line[i].subdev.entity, + MSM_ISPIF_PAD_SRC, + &camss->vfe.line[j].subdev.entity, + MSM_VFE_PAD_SINK, + 0); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + camss->ispif.line[i].subdev.entity.name, + camss->vfe.line[j].subdev.entity.name, + ret); + goto err_link; + } + } + } + + return 0; + +err_link: + msm_vfe_unregister_entities(&camss->vfe); +err_reg_vfe: + msm_ispif_unregister_entities(&camss->ispif); +err_reg_ispif: + + i = ARRAY_SIZE(camss->csid); +err_reg_csid: + for (i--; i >= 0; i--) + msm_csid_unregister_entity(&camss->csid[i]); + + i = ARRAY_SIZE(camss->csiphy); +err_reg_csiphy: + for (i--; i >= 0; i--) + msm_csiphy_unregister_entity(&camss->csiphy[i]); + + return ret; +} + +/* + * camss_unregister_entities - Unregister subdev nodes + * @camss: CAMSS device + * + * Return 0 on success or a negative error code on failure + */ +static void camss_unregister_entities(struct camss *camss) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(camss->csiphy); i++) + msm_csiphy_unregister_entity(&camss->csiphy[i]); + + for (i = 0; i < ARRAY_SIZE(camss->csid); i++) + msm_csid_unregister_entity(&camss->csid[i]); + + msm_ispif_unregister_entities(&camss->ispif); + msm_vfe_unregister_entities(&camss->vfe); +} + +static int camss_subdev_notifier_bound(struct v4l2_async_notifier *async, + struct v4l2_subdev *subdev, + struct v4l2_async_subdev *asd) +{ + struct camss *camss = container_of(async, struct camss, notifier); + struct camss_async_subdev *csd = + container_of(asd, struct camss_async_subdev, asd); + u8 id = csd->interface.csiphy_id; + struct csiphy_device *csiphy = &camss->csiphy[id]; + + csiphy->cfg.csi2 = &csd->interface.csi2; + subdev->host_priv = csiphy; + + return 0; +} + +static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async) +{ + struct camss *camss = container_of(async, struct camss, notifier); + struct v4l2_device *v4l2_dev = &camss->v4l2_dev; + struct v4l2_subdev *sd; + int ret; + + list_for_each_entry(sd, &v4l2_dev->subdevs, list) { + if (sd->host_priv) { + struct media_entity *sensor = &sd->entity; + struct csiphy_device *csiphy = + (struct csiphy_device *) sd->host_priv; + struct media_entity *input = &csiphy->subdev.entity; + unsigned int i; + + for (i = 0; i < sensor->num_pads; i++) { + if (sensor->pads[i].flags & MEDIA_PAD_FL_SOURCE) + break; + } + if (i == sensor->num_pads) { + dev_err(camss->dev, + "No source pad in external entity\n"); + return -EINVAL; + } + + ret = media_create_pad_link(sensor, i, + input, MSM_CSIPHY_PAD_SINK, + MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); + if (ret < 0) { + dev_err(camss->dev, + "Failed to link %s->%s entities: %d\n", + sensor->name, input->name, ret); + return ret; + } + } + } + + ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev); + if (ret < 0) + return ret; + + return media_device_register(&camss->media_dev); +} + +static const struct media_device_ops camss_media_ops = { + .link_notify = v4l2_pipeline_link_notify, +}; + +/* + * camss_probe - Probe CAMSS platform device + * @pdev: Pointer to CAMSS platform device + * + * Return 0 on success or a negative error code on failure + */ +static int camss_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct camss *camss; + int ret; + + camss = kzalloc(sizeof(*camss), GFP_KERNEL); + if (!camss) + return -ENOMEM; + + atomic_set(&camss->ref_count, 0); + camss->dev = dev; + platform_set_drvdata(pdev, camss); + + ret = camss_of_parse_ports(dev, &camss->notifier); + if (ret < 0) + return ret; + else if (ret == 0) + return -ENODEV; + + ret = camss_init_subdevices(camss); + if (ret < 0) + return ret; + + ret = dma_set_mask_and_coherent(dev, 0xffffffff); + if (ret) + return ret; + + camss->media_dev.dev = camss->dev; + strlcpy(camss->media_dev.model, "Qualcomm Camera Subsystem", + sizeof(camss->media_dev.model)); + camss->media_dev.ops = &camss_media_ops; + media_device_init(&camss->media_dev); + + camss->v4l2_dev.mdev = &camss->media_dev; + ret = v4l2_device_register(camss->dev, &camss->v4l2_dev); + if (ret < 0) { + dev_err(dev, "Failed to register V4L2 device: %d\n", ret); + return ret; + } + + ret = camss_register_entities(camss); + if (ret < 0) + goto err_register_entities; + + if (camss->notifier.num_subdevs) { + camss->notifier.bound = camss_subdev_notifier_bound; + camss->notifier.complete = camss_subdev_notifier_complete; + + ret = v4l2_async_notifier_register(&camss->v4l2_dev, + &camss->notifier); + if (ret) { + dev_err(dev, + "Failed to register async subdev nodes: %d\n", + ret); + goto err_register_subdevs; + } + } else { + ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev); + if (ret < 0) { + dev_err(dev, "Failed to register subdev nodes: %d\n", + ret); + goto err_register_subdevs; + } + + ret = media_device_register(&camss->media_dev); + if (ret < 0) { + dev_err(dev, "Failed to register media device: %d\n", + ret); + goto err_register_subdevs; + } + } + + return 0; + +err_register_subdevs: + camss_unregister_entities(camss); +err_register_entities: + v4l2_device_unregister(&camss->v4l2_dev); + + return ret; +} + +void camss_delete(struct camss *camss) +{ + v4l2_device_unregister(&camss->v4l2_dev); + media_device_unregister(&camss->media_dev); + media_device_cleanup(&camss->media_dev); + + kfree(camss); +} + +/* + * camss_remove - Remove CAMSS platform device + * @pdev: Pointer to CAMSS platform device + * + * Always returns 0. + */ +static int camss_remove(struct platform_device *pdev) +{ + struct camss *camss = platform_get_drvdata(pdev); + + msm_vfe_stop_streaming(&camss->vfe); + + v4l2_async_notifier_unregister(&camss->notifier); + camss_unregister_entities(camss); + + if (atomic_read(&camss->ref_count) == 0) + camss_delete(camss); + + return 0; +} + +static const struct of_device_id camss_dt_match[] = { + { .compatible = "qcom,msm8916-camss" }, + { } +}; + +MODULE_DEVICE_TABLE(of, camss_dt_match); + +static struct platform_driver qcom_camss_driver = { + .probe = camss_probe, + .remove = camss_remove, + .driver = { + .name = "qcom-camss", + .of_match_table = camss_dt_match, + }, +}; + +module_platform_driver(qcom_camss_driver); + +MODULE_ALIAS("platform:qcom-camss"); +MODULE_DESCRIPTION("Qualcomm Camera Subsystem driver"); +MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/qcom/camss-8x16/camss.h b/drivers/media/platform/qcom/camss-8x16/camss.h new file mode 100644 index 000000000000..4ad223443e4b --- /dev/null +++ b/drivers/media/platform/qcom/camss-8x16/camss.h @@ -0,0 +1,106 @@ +/* + * camss.h + * + * Qualcomm MSM Camera Subsystem - Core + * + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * Copyright (C) 2015-2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef QC_MSM_CAMSS_H +#define QC_MSM_CAMSS_H + +#include <linux/types.h> +#include <media/v4l2-async.h> +#include <media/v4l2-device.h> +#include <media/v4l2-subdev.h> +#include <media/media-device.h> +#include <media/media-entity.h> +#include <linux/device.h> + +#include "camss-csid.h" +#include "camss-csiphy.h" +#include "camss-ispif.h" +#include "camss-vfe.h" + +#define CAMSS_CSID_NUM 2 +#define CAMSS_CSIPHY_NUM 2 + +#define to_camss(ptr_module) \ + container_of(ptr_module, struct camss, ptr_module) + +#define to_device(ptr_module) \ + (to_camss(ptr_module)->dev) + +#define module_pointer(ptr_module, index) \ + ((const struct ptr_module##_device (*)[]) &(ptr_module[-(index)])) + +#define to_camss_index(ptr_module, index) \ + container_of(module_pointer(ptr_module, index), \ + struct camss, ptr_module) + +#define to_device_index(ptr_module, index) \ + (to_camss_index(ptr_module, index)->dev) + +#define CAMSS_RES_MAX 15 + +struct resources { + char *regulator[CAMSS_RES_MAX]; + char *clock[CAMSS_RES_MAX]; + u32 clock_rate[CAMSS_RES_MAX][CAMSS_RES_MAX]; + char *reg[CAMSS_RES_MAX]; + char *interrupt[CAMSS_RES_MAX]; +}; + +struct resources_ispif { + char *clock[CAMSS_RES_MAX]; + char *clock_for_reset[CAMSS_RES_MAX]; + char *reg[CAMSS_RES_MAX]; + char *interrupt; +}; + +struct camss { + struct v4l2_device v4l2_dev; + struct v4l2_async_notifier notifier; + struct media_device media_dev; + struct device *dev; + struct csiphy_device csiphy[CAMSS_CSIPHY_NUM]; + struct csid_device csid[CAMSS_CSID_NUM]; + struct ispif_device ispif; + struct vfe_device vfe; + atomic_t ref_count; +}; + +struct camss_camera_interface { + u8 csiphy_id; + struct csiphy_csi2_cfg csi2; +}; + +struct camss_async_subdev { + struct camss_camera_interface interface; + struct v4l2_async_subdev asd; +}; + +struct camss_clock { + struct clk *clk; + const char *name; + u32 *freq; + u32 nfreqs; +}; + +void camss_add_clock_margin(u64 *rate); +int camss_enable_clocks(int nclocks, struct camss_clock *clock, + struct device *dev); +void camss_disable_clocks(int nclocks, struct camss_clock *clock); +int camss_get_pixel_clock(struct media_entity *entity, u32 *pixel_clock); +void camss_delete(struct camss *camss); + +#endif /* QC_MSM_CAMSS_H */ diff --git a/drivers/media/platform/qcom/venus/Makefile b/drivers/media/platform/qcom/venus/Makefile new file mode 100644 index 000000000000..0fe9afb83697 --- /dev/null +++ b/drivers/media/platform/qcom/venus/Makefile @@ -0,0 +1,11 @@ +# Makefile for Qualcomm Venus driver + +venus-core-objs += core.o helpers.o firmware.o \ + hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o + +venus-dec-objs += vdec.o vdec_ctrls.o +venus-enc-objs += venc.o venc_ctrls.o + +obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-core.o +obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-dec.o +obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-enc.o diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c new file mode 100644 index 000000000000..41eef376eb2d --- /dev/null +++ b/drivers/media/platform/qcom/venus/core.c @@ -0,0 +1,388 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/clk.h> +#include <linux/init.h> +#include <linux/ioctl.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/pm_runtime.h> +#include <media/videobuf2-v4l2.h> +#include <media/v4l2-mem2mem.h> +#include <media/v4l2-ioctl.h> + +#include "core.h" +#include "vdec.h" +#include "venc.h" +#include "firmware.h" + +static void venus_event_notify(struct venus_core *core, u32 event) +{ + struct venus_inst *inst; + + switch (event) { + case EVT_SYS_WATCHDOG_TIMEOUT: + case EVT_SYS_ERROR: + break; + default: + return; + } + + mutex_lock(&core->lock); + core->sys_error = true; + list_for_each_entry(inst, &core->instances, list) + inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL); + mutex_unlock(&core->lock); + + disable_irq_nosync(core->irq); + + /* + * Delay recovery to ensure venus has completed any pending cache + * operations. Without this sleep, we see device reset when firmware is + * unloaded after a system error. + */ + schedule_delayed_work(&core->work, msecs_to_jiffies(100)); +} + +static const struct hfi_core_ops venus_core_ops = { + .event_notify = venus_event_notify, +}; + +static void venus_sys_error_handler(struct work_struct *work) +{ + struct venus_core *core = + container_of(work, struct venus_core, work.work); + int ret = 0; + + dev_warn(core->dev, "system error has occurred, starting recovery!\n"); + + pm_runtime_get_sync(core->dev); + + hfi_core_deinit(core, true); + hfi_destroy(core); + mutex_lock(&core->lock); + venus_shutdown(core->dev); + + pm_runtime_put_sync(core->dev); + + ret |= hfi_create(core, &venus_core_ops); + + pm_runtime_get_sync(core->dev); + + ret |= venus_boot(core->dev, core->res->fwname); + + ret |= hfi_core_resume(core, true); + + enable_irq(core->irq); + + mutex_unlock(&core->lock); + + ret |= hfi_core_init(core); + + pm_runtime_put_sync(core->dev); + + if (ret) { + disable_irq_nosync(core->irq); + dev_warn(core->dev, "recovery failed (%d)\n", ret); + schedule_delayed_work(&core->work, msecs_to_jiffies(10)); + return; + } + + mutex_lock(&core->lock); + core->sys_error = false; + mutex_unlock(&core->lock); +} + +static int venus_clks_get(struct venus_core *core) +{ + const struct venus_resources *res = core->res; + struct device *dev = core->dev; + unsigned int i; + + for (i = 0; i < res->clks_num; i++) { + core->clks[i] = devm_clk_get(dev, res->clks[i]); + if (IS_ERR(core->clks[i])) + return PTR_ERR(core->clks[i]); + } + + return 0; +} + +static int venus_clks_enable(struct venus_core *core) +{ + const struct venus_resources *res = core->res; + unsigned int i; + int ret; + + for (i = 0; i < res->clks_num; i++) { + ret = clk_prepare_enable(core->clks[i]); + if (ret) + goto err; + } + + return 0; +err: + while (i--) + clk_disable_unprepare(core->clks[i]); + + return ret; +} + +static void venus_clks_disable(struct venus_core *core) +{ + const struct venus_resources *res = core->res; + unsigned int i = res->clks_num; + + while (i--) + clk_disable_unprepare(core->clks[i]); +} + +static int venus_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct venus_core *core; + struct resource *r; + int ret; + + core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL); + if (!core) + return -ENOMEM; + + core->dev = dev; + platform_set_drvdata(pdev, core); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + core->base = devm_ioremap_resource(dev, r); + if (IS_ERR(core->base)) + return PTR_ERR(core->base); + + core->irq = platform_get_irq(pdev, 0); + if (core->irq < 0) + return core->irq; + + core->res = of_device_get_match_data(dev); + if (!core->res) + return -ENODEV; + + ret = venus_clks_get(core); + if (ret) + return ret; + + ret = dma_set_mask_and_coherent(dev, core->res->dma_mask); + if (ret) + return ret; + + INIT_LIST_HEAD(&core->instances); + mutex_init(&core->lock); + INIT_DELAYED_WORK(&core->work, venus_sys_error_handler); + + ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, hfi_isr_thread, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "venus", core); + if (ret) + return ret; + + ret = hfi_create(core, &venus_core_ops); + if (ret) + return ret; + + pm_runtime_enable(dev); + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto err_runtime_disable; + + ret = venus_boot(dev, core->res->fwname); + if (ret) + goto err_runtime_disable; + + ret = hfi_core_resume(core, true); + if (ret) + goto err_venus_shutdown; + + ret = hfi_core_init(core); + if (ret) + goto err_venus_shutdown; + + ret = v4l2_device_register(dev, &core->v4l2_dev); + if (ret) + goto err_core_deinit; + + ret = of_platform_populate(dev->of_node, NULL, NULL, dev); + if (ret) + goto err_dev_unregister; + + ret = pm_runtime_put_sync(dev); + if (ret) + goto err_dev_unregister; + + return 0; + +err_dev_unregister: + v4l2_device_unregister(&core->v4l2_dev); +err_core_deinit: + hfi_core_deinit(core, false); +err_venus_shutdown: + venus_shutdown(dev); +err_runtime_disable: + pm_runtime_set_suspended(dev); + pm_runtime_disable(dev); + hfi_destroy(core); + return ret; +} + +static int venus_remove(struct platform_device *pdev) +{ + struct venus_core *core = platform_get_drvdata(pdev); + struct device *dev = core->dev; + int ret; + + ret = pm_runtime_get_sync(dev); + WARN_ON(ret < 0); + + ret = hfi_core_deinit(core, true); + WARN_ON(ret); + + hfi_destroy(core); + venus_shutdown(dev); + of_platform_depopulate(dev); + + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + + v4l2_device_unregister(&core->v4l2_dev); + + return ret; +} + +static __maybe_unused int venus_runtime_suspend(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret; + + ret = hfi_core_suspend(core); + + venus_clks_disable(core); + + return ret; +} + +static __maybe_unused int venus_runtime_resume(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret; + + ret = venus_clks_enable(core); + if (ret) + return ret; + + ret = hfi_core_resume(core, false); + if (ret) + goto err_clks_disable; + + return 0; + +err_clks_disable: + venus_clks_disable(core); + return ret; +} + +static const struct dev_pm_ops venus_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(venus_runtime_suspend, venus_runtime_resume, NULL) +}; + +static const struct freq_tbl msm8916_freq_table[] = { + { 352800, 228570000 }, /* 1920x1088 @ 30 + 1280x720 @ 30 */ + { 244800, 160000000 }, /* 1920x1088 @ 30 */ + { 108000, 100000000 }, /* 1280x720 @ 30 */ +}; + +static const struct reg_val msm8916_reg_preset[] = { + { 0xe0020, 0x05555556 }, + { 0xe0024, 0x05555556 }, + { 0x80124, 0x00000003 }, +}; + +static const struct venus_resources msm8916_res = { + .freq_tbl = msm8916_freq_table, + .freq_tbl_size = ARRAY_SIZE(msm8916_freq_table), + .reg_tbl = msm8916_reg_preset, + .reg_tbl_size = ARRAY_SIZE(msm8916_reg_preset), + .clks = { "core", "iface", "bus", }, + .clks_num = 3, + .max_load = 352800, /* 720p@30 + 1080p@30 */ + .hfi_version = HFI_VERSION_1XX, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .dma_mask = 0xddc00000 - 1, + .fwname = "qcom/venus-1.8/venus.mdt", +}; + +static const struct freq_tbl msm8996_freq_table[] = { + { 1944000, 490000000 }, /* 4k UHD @ 60 */ + { 972000, 320000000 }, /* 4k UHD @ 30 */ + { 489600, 150000000 }, /* 1080p @ 60 */ + { 244800, 75000000 }, /* 1080p @ 30 */ +}; + +static const struct reg_val msm8996_reg_preset[] = { + { 0x80010, 0xffffffff }, + { 0x80018, 0x00001556 }, + { 0x8001C, 0x00001556 }, +}; + +static const struct venus_resources msm8996_res = { + .freq_tbl = msm8996_freq_table, + .freq_tbl_size = ARRAY_SIZE(msm8996_freq_table), + .reg_tbl = msm8996_reg_preset, + .reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset), + .clks = {"core", "iface", "bus", "mbus" }, + .clks_num = 4, + .max_load = 2563200, + .hfi_version = HFI_VERSION_3XX, + .vmem_id = VIDC_RESOURCE_NONE, + .vmem_size = 0, + .vmem_addr = 0, + .dma_mask = 0xddc00000 - 1, + .fwname = "qcom/venus-4.2/venus.mdt", +}; + +static const struct of_device_id venus_dt_match[] = { + { .compatible = "qcom,msm8916-venus", .data = &msm8916_res, }, + { .compatible = "qcom,msm8996-venus", .data = &msm8996_res, }, + { } +}; +MODULE_DEVICE_TABLE(of, venus_dt_match); + +static struct platform_driver qcom_venus_driver = { + .probe = venus_probe, + .remove = venus_remove, + .driver = { + .name = "qcom-venus", + .of_match_table = venus_dt_match, + .pm = &venus_pm_ops, + }, +}; +module_platform_driver(qcom_venus_driver); + +MODULE_ALIAS("platform:qcom-venus"); +MODULE_DESCRIPTION("Qualcomm Venus video encoder and decoder driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h new file mode 100644 index 000000000000..cba092bcb76d --- /dev/null +++ b/drivers/media/platform/qcom/venus/core.h @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __VENUS_CORE_H_ +#define __VENUS_CORE_H_ + +#include <linux/list.h> +#include <media/videobuf2-v4l2.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> + +#include "hfi.h" + +#define VIDC_CLKS_NUM_MAX 4 + +struct freq_tbl { + unsigned int load; + unsigned long freq; +}; + +struct reg_val { + u32 reg; + u32 value; +}; + +struct venus_resources { + u64 dma_mask; + const struct freq_tbl *freq_tbl; + unsigned int freq_tbl_size; + const struct reg_val *reg_tbl; + unsigned int reg_tbl_size; + const char * const clks[VIDC_CLKS_NUM_MAX]; + unsigned int clks_num; + enum hfi_version hfi_version; + u32 max_load; + unsigned int vmem_id; + u32 vmem_size; + u32 vmem_addr; + const char *fwname; +}; + +struct venus_format { + u32 pixfmt; + unsigned int num_planes; + u32 type; +}; + +/** + * struct venus_core - holds core parameters valid for all instances + * + * @base: IO memory base address + * @irq: Venus irq + * @clks: an array of struct clk pointers + * @core0_clk: a struct clk pointer for core0 + * @core1_clk: a struct clk pointer for core1 + * @vdev_dec: a reference to video device structure for decoder instances + * @vdev_enc: a reference to video device structure for encoder instances + * @v4l2_dev: a holder for v4l2 device structure + * @res: a reference to venus resources structure + * @dev: convenience struct device pointer + * @dev_dec: convenience struct device pointer for decoder device + * @dev_enc: convenience struct device pointer for encoder device + * @lock: a lock for this strucure + * @instances: a list_head of all instances + * @insts_count: num of instances + * @state: the state of the venus core + * @done: a completion for sync HFI operations + * @error: an error returned during last HFI sync operations + * @sys_error: an error flag that signal system error event + * @core_ops: the core operations + * @enc_codecs: encoders supported by this core + * @dec_codecs: decoders supported by this core + * @max_sessions_supported: holds the maximum number of sessions + * @core_caps: core capabilities + * @priv: a private filed for HFI operations + * @ops: the core HFI operations + * @work: a delayed work for handling system fatal error + */ +struct venus_core { + void __iomem *base; + int irq; + struct clk *clks[VIDC_CLKS_NUM_MAX]; + struct clk *core0_clk; + struct clk *core1_clk; + struct video_device *vdev_dec; + struct video_device *vdev_enc; + struct v4l2_device v4l2_dev; + const struct venus_resources *res; + struct device *dev; + struct device *dev_dec; + struct device *dev_enc; + struct mutex lock; + struct list_head instances; + atomic_t insts_count; + unsigned int state; + struct completion done; + unsigned int error; + bool sys_error; + const struct hfi_core_ops *core_ops; + u32 enc_codecs; + u32 dec_codecs; + unsigned int max_sessions_supported; +#define ENC_ROTATION_CAPABILITY 0x1 +#define ENC_SCALING_CAPABILITY 0x2 +#define ENC_DEINTERLACE_CAPABILITY 0x4 +#define DEC_MULTI_STREAM_CAPABILITY 0x8 + unsigned int core_caps; + void *priv; + const struct hfi_ops *ops; + struct delayed_work work; +}; + +struct vdec_controls { + u32 post_loop_deb_mode; + u32 profile; + u32 level; +}; + +struct venc_controls { + u16 gop_size; + u32 num_p_frames; + u32 num_b_frames; + u32 bitrate_mode; + u32 bitrate; + u32 bitrate_peak; + + u32 h264_i_period; + u32 h264_entropy_mode; + u32 h264_i_qp; + u32 h264_p_qp; + u32 h264_b_qp; + u32 h264_min_qp; + u32 h264_max_qp; + u32 h264_loop_filter_mode; + u32 h264_loop_filter_alpha; + u32 h264_loop_filter_beta; + + u32 vp8_min_qp; + u32 vp8_max_qp; + + u32 multi_slice_mode; + u32 multi_slice_max_bytes; + u32 multi_slice_max_mb; + + u32 header_mode; + + struct { + u32 mpeg4; + u32 h264; + u32 vpx; + } profile; + struct { + u32 mpeg4; + u32 h264; + } level; +}; + +struct venus_buffer { + struct vb2_v4l2_buffer vb; + struct list_head list; + dma_addr_t dma_addr; + u32 size; + struct list_head reg_list; + u32 flags; + struct list_head ref_list; +}; + +#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb) + +/** + * struct venus_inst - holds per instance paramerters + * + * @list: used for attach an instance to the core + * @lock: instance lock + * @core: a reference to the core struct + * @internalbufs: a list of internal bufferes + * @registeredbufs: a list of registered capture bufferes + * @delayed_process a list of delayed buffers + * @delayed_process_work: a work_struct for process delayed buffers + * @ctrl_handler: v4l control handler + * @controls: a union of decoder and encoder control parameters + * @fh: a holder of v4l file handle structure + * @streamon_cap: stream on flag for capture queue + * @streamon_out: stream on flag for output queue + * @cmd_stop: a flag to signal encoder/decoder commands + * @width: current capture width + * @height: current capture height + * @out_width: current output width + * @out_height: current output height + * @colorspace: current color space + * @quantization: current quantization + * @xfer_func: current xfer function + * @fps: holds current FPS + * @timeperframe: holds current time per frame structure + * @fmt_out: a reference to output format structure + * @fmt_cap: a reference to capture format structure + * @num_input_bufs: holds number of input buffers + * @num_output_bufs: holds number of output buffers + * @input_buf_size holds input buffer size + * @output_buf_size: holds output buffer size + * @reconfig: a flag raised by decoder when the stream resolution changed + * @reconfig_width: holds the new width + * @reconfig_height: holds the new height + * @sequence_cap: a sequence counter for capture queue + * @sequence_out: a sequence counter for output queue + * @m2m_dev: a reference to m2m device structure + * @m2m_ctx: a reference to m2m context structure + * @state: current state of the instance + * @done: a completion for sync HFI operation + * @error: an error returned during last HFI sync operation + * @session_error: a flag rised by HFI interface in case of session error + * @ops: HFI operations + * @priv: a private for HFI operations callbacks + * @session_type: the type of the session (decoder or encoder) + * @hprop: a union used as a holder by get property + * @cap_width: width capability + * @cap_height: height capability + * @cap_mbs_per_frame: macroblocks per frame capability + * @cap_mbs_per_sec: macroblocks per second capability + * @cap_framerate: framerate capability + * @cap_scale_x: horizontal scaling capability + * @cap_scale_y: vertical scaling capability + * @cap_bitrate: bitrate capability + * @cap_hier_p: hier capability + * @cap_ltr_count: LTR count capability + * @cap_secure_output2_threshold: secure OUTPUT2 threshold capability + * @cap_bufs_mode_static: buffers allocation mode capability + * @cap_bufs_mode_dynamic: buffers allocation mode capability + * @pl_count: count of supported profiles/levels + * @pl: supported profiles/levels + * @bufreq: holds buffer requirements + */ +struct venus_inst { + struct list_head list; + struct mutex lock; + struct venus_core *core; + struct list_head internalbufs; + struct list_head registeredbufs; + struct list_head delayed_process; + struct work_struct delayed_process_work; + + struct v4l2_ctrl_handler ctrl_handler; + union { + struct vdec_controls dec; + struct venc_controls enc; + } controls; + struct v4l2_fh fh; + unsigned int streamon_cap, streamon_out; + bool cmd_stop; + u32 width; + u32 height; + u32 out_width; + u32 out_height; + u32 colorspace; + u8 ycbcr_enc; + u8 quantization; + u8 xfer_func; + u64 fps; + struct v4l2_fract timeperframe; + const struct venus_format *fmt_out; + const struct venus_format *fmt_cap; + unsigned int num_input_bufs; + unsigned int num_output_bufs; + unsigned int input_buf_size; + unsigned int output_buf_size; + bool reconfig; + u32 reconfig_width; + u32 reconfig_height; + u32 sequence_cap; + u32 sequence_out; + struct v4l2_m2m_dev *m2m_dev; + struct v4l2_m2m_ctx *m2m_ctx; + unsigned int state; + struct completion done; + unsigned int error; + bool session_error; + const struct hfi_inst_ops *ops; + u32 session_type; + union hfi_get_property hprop; + struct hfi_capability cap_width; + struct hfi_capability cap_height; + struct hfi_capability cap_mbs_per_frame; + struct hfi_capability cap_mbs_per_sec; + struct hfi_capability cap_framerate; + struct hfi_capability cap_scale_x; + struct hfi_capability cap_scale_y; + struct hfi_capability cap_bitrate; + struct hfi_capability cap_hier_p; + struct hfi_capability cap_ltr_count; + struct hfi_capability cap_secure_output2_threshold; + bool cap_bufs_mode_static; + bool cap_bufs_mode_dynamic; + unsigned int pl_count; + struct hfi_profile_level pl[HFI_MAX_PROFILE_COUNT]; + struct hfi_buffer_requirements bufreq[HFI_BUFFER_TYPE_MAX]; +}; + +#define ctrl_to_inst(ctrl) \ + container_of((ctrl)->handler, struct venus_inst, ctrl_handler) + +static inline struct venus_inst *to_inst(struct file *filp) +{ + return container_of(filp->private_data, struct venus_inst, fh); +} + +static inline void *to_hfi_priv(struct venus_core *core) +{ + return core->priv; +} + +#endif diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c new file mode 100644 index 000000000000..d6d9560c1c19 --- /dev/null +++ b/drivers/media/platform/qcom/venus/firmware.c @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/firmware.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/qcom_scm.h> +#include <linux/sizes.h> +#include <linux/soc/qcom/mdt_loader.h> + +#include "firmware.h" + +#define VENUS_PAS_ID 9 +#define VENUS_FW_MEM_SIZE (6 * SZ_1M) + +int venus_boot(struct device *dev, const char *fwname) +{ + const struct firmware *mdt; + struct device_node *node; + phys_addr_t mem_phys; + struct resource r; + ssize_t fw_size; + size_t mem_size; + void *mem_va; + int ret; + + if (!qcom_scm_is_available()) + return -EPROBE_DEFER; + + node = of_parse_phandle(dev->of_node, "memory-region", 0); + if (!node) { + dev_err(dev, "no memory-region specified\n"); + return -EINVAL; + } + + ret = of_address_to_resource(node, 0, &r); + if (ret) + return ret; + + mem_phys = r.start; + mem_size = resource_size(&r); + + if (mem_size < VENUS_FW_MEM_SIZE) + return -EINVAL; + + mem_va = memremap(r.start, mem_size, MEMREMAP_WC); + if (!mem_va) { + dev_err(dev, "unable to map memory region: %pa+%zx\n", + &r.start, mem_size); + return -ENOMEM; + } + + ret = request_firmware(&mdt, fwname, dev); + if (ret < 0) + goto err_unmap; + + fw_size = qcom_mdt_get_size(mdt); + if (fw_size < 0) { + ret = fw_size; + release_firmware(mdt); + goto err_unmap; + } + + ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys, + mem_size); + + release_firmware(mdt); + + if (ret) + goto err_unmap; + + ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID); + if (ret) + goto err_unmap; + +err_unmap: + memunmap(mem_va); + return ret; +} + +int venus_shutdown(struct device *dev) +{ + return qcom_scm_pas_shutdown(VENUS_PAS_ID); +} diff --git a/drivers/media/platform/qcom/venus/firmware.h b/drivers/media/platform/qcom/venus/firmware.h new file mode 100644 index 000000000000..428efb56d339 --- /dev/null +++ b/drivers/media/platform/qcom/venus/firmware.h @@ -0,0 +1,22 @@ +/* + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __VENUS_FIRMWARE_H__ +#define __VENUS_FIRMWARE_H__ + +struct device; + +int venus_boot(struct device *dev, const char *fwname); +int venus_shutdown(struct device *dev); + +#endif diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c new file mode 100644 index 000000000000..cac429be5609 --- /dev/null +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -0,0 +1,778 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/clk.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <media/videobuf2-dma-sg.h> +#include <media/v4l2-mem2mem.h> +#include <asm/div64.h> + +#include "core.h" +#include "helpers.h" +#include "hfi_helper.h" + +struct intbuf { + struct list_head list; + u32 type; + size_t size; + void *va; + dma_addr_t da; + unsigned long attrs; +}; + +bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt) +{ + struct venus_core *core = inst->core; + u32 session_type = inst->session_type; + u32 codec = 0; + + switch (v4l2_pixfmt) { + case V4L2_PIX_FMT_H264: + codec = HFI_VIDEO_CODEC_H264; + break; + case V4L2_PIX_FMT_H263: + codec = HFI_VIDEO_CODEC_H263; + break; + case V4L2_PIX_FMT_MPEG1: + codec = HFI_VIDEO_CODEC_MPEG1; + break; + case V4L2_PIX_FMT_MPEG2: + codec = HFI_VIDEO_CODEC_MPEG2; + break; + case V4L2_PIX_FMT_MPEG4: + codec = HFI_VIDEO_CODEC_MPEG4; + break; + case V4L2_PIX_FMT_VC1_ANNEX_G: + case V4L2_PIX_FMT_VC1_ANNEX_L: + codec = HFI_VIDEO_CODEC_VC1; + break; + case V4L2_PIX_FMT_VP8: + codec = HFI_VIDEO_CODEC_VP8; + break; + case V4L2_PIX_FMT_VP9: + codec = HFI_VIDEO_CODEC_VP9; + break; + case V4L2_PIX_FMT_XVID: + codec = HFI_VIDEO_CODEC_DIVX; + break; + default: + break; + } + + if (!codec) + return false; + + if (session_type == VIDC_SESSION_TYPE_ENC && core->enc_codecs & codec) + return true; + + if (session_type == VIDC_SESSION_TYPE_DEC && core->dec_codecs & codec) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(venus_helper_check_codec); + +static int intbufs_set_buffer(struct venus_inst *inst, u32 type) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev; + struct hfi_buffer_requirements bufreq; + struct hfi_buffer_desc bd; + struct intbuf *buf; + unsigned int i; + int ret; + + ret = venus_helper_get_bufreq(inst, type, &bufreq); + if (ret) + return 0; + + if (!bufreq.size) + return 0; + + for (i = 0; i < bufreq.count_actual; i++) { + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto fail; + } + + buf->type = bufreq.type; + buf->size = bufreq.size; + buf->attrs = DMA_ATTR_WRITE_COMBINE | + DMA_ATTR_NO_KERNEL_MAPPING; + buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL, + buf->attrs); + if (!buf->va) { + ret = -ENOMEM; + goto fail; + } + + memset(&bd, 0, sizeof(bd)); + bd.buffer_size = buf->size; + bd.buffer_type = buf->type; + bd.num_buffers = 1; + bd.device_addr = buf->da; + + ret = hfi_session_set_buffers(inst, &bd); + if (ret) { + dev_err(dev, "set session buffers failed\n"); + goto dma_free; + } + + list_add_tail(&buf->list, &inst->internalbufs); + } + + return 0; + +dma_free: + dma_free_attrs(dev, buf->size, buf->va, buf->da, buf->attrs); +fail: + kfree(buf); + return ret; +} + +static int intbufs_unset_buffers(struct venus_inst *inst) +{ + struct hfi_buffer_desc bd = {0}; + struct intbuf *buf, *n; + int ret = 0; + + list_for_each_entry_safe(buf, n, &inst->internalbufs, list) { + bd.buffer_size = buf->size; + bd.buffer_type = buf->type; + bd.num_buffers = 1; + bd.device_addr = buf->da; + bd.response_required = true; + + ret = hfi_session_unset_buffers(inst, &bd); + + list_del_init(&buf->list); + dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da, + buf->attrs); + kfree(buf); + } + + return ret; +} + +static const unsigned int intbuf_types[] = { + HFI_BUFFER_INTERNAL_SCRATCH, + HFI_BUFFER_INTERNAL_SCRATCH_1, + HFI_BUFFER_INTERNAL_SCRATCH_2, + HFI_BUFFER_INTERNAL_PERSIST, + HFI_BUFFER_INTERNAL_PERSIST_1, +}; + +static int intbufs_alloc(struct venus_inst *inst) +{ + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(intbuf_types); i++) { + ret = intbufs_set_buffer(inst, intbuf_types[i]); + if (ret) + goto error; + } + + return 0; + +error: + intbufs_unset_buffers(inst); + return ret; +} + +static int intbufs_free(struct venus_inst *inst) +{ + return intbufs_unset_buffers(inst); +} + +static u32 load_per_instance(struct venus_inst *inst) +{ + u32 mbs; + + if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP)) + return 0; + + mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16); + + return mbs * inst->fps; +} + +static u32 load_per_type(struct venus_core *core, u32 session_type) +{ + struct venus_inst *inst = NULL; + u32 mbs_per_sec = 0; + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) { + if (inst->session_type != session_type) + continue; + + mbs_per_sec += load_per_instance(inst); + } + mutex_unlock(&core->lock); + + return mbs_per_sec; +} + +static int load_scale_clocks(struct venus_core *core) +{ + const struct freq_tbl *table = core->res->freq_tbl; + unsigned int num_rows = core->res->freq_tbl_size; + unsigned long freq = table[0].freq; + struct clk *clk = core->clks[0]; + struct device *dev = core->dev; + u32 mbs_per_sec; + unsigned int i; + int ret; + + mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) + + load_per_type(core, VIDC_SESSION_TYPE_DEC); + + if (mbs_per_sec > core->res->max_load) + dev_warn(dev, "HW is overloaded, needed: %d max: %d\n", + mbs_per_sec, core->res->max_load); + + if (!mbs_per_sec && num_rows > 1) { + freq = table[num_rows - 1].freq; + goto set_freq; + } + + for (i = 0; i < num_rows; i++) { + if (mbs_per_sec > table[i].load) + break; + freq = table[i].freq; + } + +set_freq: + + if (core->res->hfi_version == HFI_VERSION_3XX) { + ret = clk_set_rate(clk, freq); + ret |= clk_set_rate(core->core0_clk, freq); + ret |= clk_set_rate(core->core1_clk, freq); + } else { + ret = clk_set_rate(clk, freq); + } + + if (ret) { + dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret); + return ret; + } + + return 0; +} + +static void fill_buffer_desc(const struct venus_buffer *buf, + struct hfi_buffer_desc *bd, bool response) +{ + memset(bd, 0, sizeof(*bd)); + bd->buffer_type = HFI_BUFFER_OUTPUT; + bd->buffer_size = buf->size; + bd->num_buffers = 1; + bd->device_addr = buf->dma_addr; + bd->response_required = response; +} + +static void return_buf_error(struct venus_inst *inst, + struct vb2_v4l2_buffer *vbuf) +{ + struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + + if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf); + else + v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf); + + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); +} + +static int +session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf) +{ + struct venus_buffer *buf = to_venus_buffer(vbuf); + struct vb2_buffer *vb = &vbuf->vb2_buf; + unsigned int type = vb->type; + struct hfi_frame_data fdata; + int ret; + + memset(&fdata, 0, sizeof(fdata)); + fdata.alloc_len = buf->size; + fdata.device_addr = buf->dma_addr; + fdata.timestamp = vb->timestamp; + do_div(fdata.timestamp, NSEC_PER_USEC); + fdata.flags = 0; + fdata.clnt_data = vbuf->vb2_buf.index; + + if (!fdata.timestamp) + fdata.flags |= HFI_BUFFERFLAG_TIMESTAMPINVALID; + + if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + fdata.buffer_type = HFI_BUFFER_INPUT; + fdata.filled_len = vb2_get_plane_payload(vb, 0); + fdata.offset = vb->planes[0].data_offset; + + if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len) + fdata.flags |= HFI_BUFFERFLAG_EOS; + } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + fdata.buffer_type = HFI_BUFFER_OUTPUT; + fdata.filled_len = 0; + fdata.offset = 0; + } + + ret = hfi_session_process_buf(inst, &fdata); + if (ret) + return ret; + + return 0; +} + +static inline int is_reg_unreg_needed(struct venus_inst *inst) +{ + if (inst->session_type == VIDC_SESSION_TYPE_DEC && + inst->core->res->hfi_version == HFI_VERSION_3XX) + return 0; + + if (inst->session_type == VIDC_SESSION_TYPE_DEC && + inst->cap_bufs_mode_dynamic && + inst->core->res->hfi_version == HFI_VERSION_1XX) + return 0; + + return 1; +} + +static int session_unregister_bufs(struct venus_inst *inst) +{ + struct venus_buffer *buf, *n; + struct hfi_buffer_desc bd; + int ret = 0; + + if (!is_reg_unreg_needed(inst)) + return 0; + + list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) { + fill_buffer_desc(buf, &bd, true); + ret = hfi_session_unset_buffers(inst, &bd); + list_del_init(&buf->reg_list); + } + + return ret; +} + +static int session_register_bufs(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev; + struct hfi_buffer_desc bd; + struct venus_buffer *buf; + int ret = 0; + + if (!is_reg_unreg_needed(inst)) + return 0; + + list_for_each_entry(buf, &inst->registeredbufs, reg_list) { + fill_buffer_desc(buf, &bd, false); + ret = hfi_session_set_buffers(inst, &bd); + if (ret) { + dev_err(dev, "%s: set buffer failed\n", __func__); + break; + } + } + + return ret; +} + +int venus_helper_get_bufreq(struct venus_inst *inst, u32 type, + struct hfi_buffer_requirements *req) +{ + u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS; + union hfi_get_property hprop; + unsigned int i; + int ret; + + if (req) + memset(req, 0, sizeof(*req)); + + ret = hfi_session_get_property(inst, ptype, &hprop); + if (ret) + return ret; + + ret = -EINVAL; + + for (i = 0; i < HFI_BUFFER_TYPE_MAX; i++) { + if (hprop.bufreq[i].type != type) + continue; + + if (req) + memcpy(req, &hprop.bufreq[i], sizeof(*req)); + ret = 0; + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(venus_helper_get_bufreq); + +int venus_helper_set_input_resolution(struct venus_inst *inst, + unsigned int width, unsigned int height) +{ + u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE; + struct hfi_framesize fs; + + fs.buffer_type = HFI_BUFFER_INPUT; + fs.width = width; + fs.height = height; + + return hfi_session_set_property(inst, ptype, &fs); +} +EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution); + +int venus_helper_set_output_resolution(struct venus_inst *inst, + unsigned int width, unsigned int height) +{ + u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE; + struct hfi_framesize fs; + + fs.buffer_type = HFI_BUFFER_OUTPUT; + fs.width = width; + fs.height = height; + + return hfi_session_set_property(inst, ptype, &fs); +} +EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution); + +int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs, + unsigned int output_bufs) +{ + u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL; + struct hfi_buffer_count_actual buf_count; + int ret; + + buf_count.type = HFI_BUFFER_INPUT; + buf_count.count_actual = input_bufs; + + ret = hfi_session_set_property(inst, ptype, &buf_count); + if (ret) + return ret; + + buf_count.type = HFI_BUFFER_OUTPUT; + buf_count.count_actual = output_bufs; + + return hfi_session_set_property(inst, ptype, &buf_count); +} +EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs); + +int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt) +{ + struct hfi_uncompressed_format_select fmt; + u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT; + int ret; + + if (inst->session_type == VIDC_SESSION_TYPE_DEC) + fmt.buffer_type = HFI_BUFFER_OUTPUT; + else if (inst->session_type == VIDC_SESSION_TYPE_ENC) + fmt.buffer_type = HFI_BUFFER_INPUT; + else + return -EINVAL; + + switch (pixfmt) { + case V4L2_PIX_FMT_NV12: + fmt.format = HFI_COLOR_FORMAT_NV12; + break; + case V4L2_PIX_FMT_NV21: + fmt.format = HFI_COLOR_FORMAT_NV21; + break; + default: + return -EINVAL; + } + + ret = hfi_session_set_property(inst, ptype, &fmt); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(venus_helper_set_color_format); + +static void delayed_process_buf_func(struct work_struct *work) +{ + struct venus_buffer *buf, *n; + struct venus_inst *inst; + int ret; + + inst = container_of(work, struct venus_inst, delayed_process_work); + + mutex_lock(&inst->lock); + + if (!(inst->streamon_out & inst->streamon_cap)) + goto unlock; + + list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) { + if (buf->flags & HFI_BUFFERFLAG_READONLY) + continue; + + ret = session_process_buf(inst, &buf->vb); + if (ret) + return_buf_error(inst, &buf->vb); + + list_del_init(&buf->ref_list); + } +unlock: + mutex_unlock(&inst->lock); +} + +void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx) +{ + struct venus_buffer *buf; + + list_for_each_entry(buf, &inst->registeredbufs, reg_list) { + if (buf->vb.vb2_buf.index == idx) { + buf->flags &= ~HFI_BUFFERFLAG_READONLY; + schedule_work(&inst->delayed_process_work); + break; + } + } +} +EXPORT_SYMBOL_GPL(venus_helper_release_buf_ref); + +void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf) +{ + struct venus_buffer *buf = to_venus_buffer(vbuf); + + buf->flags |= HFI_BUFFERFLAG_READONLY; +} +EXPORT_SYMBOL_GPL(venus_helper_acquire_buf_ref); + +static int is_buf_refed(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf) +{ + struct venus_buffer *buf = to_venus_buffer(vbuf); + + if (buf->flags & HFI_BUFFERFLAG_READONLY) { + list_add_tail(&buf->ref_list, &inst->delayed_process); + schedule_work(&inst->delayed_process_work); + return 1; + } + + return 0; +} + +struct vb2_v4l2_buffer * +venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx) +{ + struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + + if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx); + else + return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx); +} +EXPORT_SYMBOL_GPL(venus_helper_find_buf); + +int venus_helper_vb2_buf_init(struct vb2_buffer *vb) +{ + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct venus_buffer *buf = to_venus_buffer(vbuf); + struct sg_table *sgt; + + sgt = vb2_dma_sg_plane_desc(vb, 0); + if (!sgt) + return -EFAULT; + + buf->size = vb2_plane_size(vb, 0); + buf->dma_addr = sg_dma_address(sgt->sgl); + + if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + list_add_tail(&buf->reg_list, &inst->registeredbufs); + + return 0; +} +EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init); + +int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb) +{ + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + + if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + vb2_plane_size(vb, 0) < inst->output_buf_size) + return -EINVAL; + if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + vb2_plane_size(vb, 0) < inst->input_buf_size) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare); + +void venus_helper_vb2_buf_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + int ret; + + mutex_lock(&inst->lock); + + if (inst->cmd_stop) { + vbuf->flags |= V4L2_BUF_FLAG_LAST; + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE); + inst->cmd_stop = false; + goto unlock; + } + + v4l2_m2m_buf_queue(m2m_ctx, vbuf); + + if (!(inst->streamon_out & inst->streamon_cap)) + goto unlock; + + ret = is_buf_refed(inst, vbuf); + if (ret) + goto unlock; + + ret = session_process_buf(inst, vbuf); + if (ret) + return_buf_error(inst, vbuf); + +unlock: + mutex_unlock(&inst->lock); +} +EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue); + +void venus_helper_buffers_done(struct venus_inst *inst, + enum vb2_buffer_state state) +{ + struct vb2_v4l2_buffer *buf; + + while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx))) + v4l2_m2m_buf_done(buf, state); + while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx))) + v4l2_m2m_buf_done(buf, state); +} +EXPORT_SYMBOL_GPL(venus_helper_buffers_done); + +void venus_helper_vb2_stop_streaming(struct vb2_queue *q) +{ + struct venus_inst *inst = vb2_get_drv_priv(q); + struct venus_core *core = inst->core; + int ret; + + mutex_lock(&inst->lock); + + if (inst->streamon_out & inst->streamon_cap) { + ret = hfi_session_stop(inst); + ret |= hfi_session_unload_res(inst); + ret |= session_unregister_bufs(inst); + ret |= intbufs_free(inst); + ret |= hfi_session_deinit(inst); + + if (inst->session_error || core->sys_error) + ret = -EIO; + + if (ret) + hfi_session_abort(inst); + + load_scale_clocks(core); + INIT_LIST_HEAD(&inst->registeredbufs); + } + + venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR); + + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->streamon_out = 0; + else + inst->streamon_cap = 0; + + mutex_unlock(&inst->lock); +} +EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming); + +int venus_helper_vb2_start_streaming(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + int ret; + + ret = intbufs_alloc(inst); + if (ret) + return ret; + + ret = session_register_bufs(inst); + if (ret) + goto err_bufs_free; + + load_scale_clocks(core); + + ret = hfi_session_load_res(inst); + if (ret) + goto err_unreg_bufs; + + ret = hfi_session_start(inst); + if (ret) + goto err_unload_res; + + return 0; + +err_unload_res: + hfi_session_unload_res(inst); +err_unreg_bufs: + session_unregister_bufs(inst); +err_bufs_free: + intbufs_free(inst); + return ret; +} +EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming); + +void venus_helper_m2m_device_run(void *priv) +{ + struct venus_inst *inst = priv; + struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx; + struct v4l2_m2m_buffer *buf, *n; + int ret; + + mutex_lock(&inst->lock); + + v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) { + ret = session_process_buf(inst, &buf->vb); + if (ret) + return_buf_error(inst, &buf->vb); + } + + v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) { + ret = session_process_buf(inst, &buf->vb); + if (ret) + return_buf_error(inst, &buf->vb); + } + + mutex_unlock(&inst->lock); +} +EXPORT_SYMBOL_GPL(venus_helper_m2m_device_run); + +void venus_helper_m2m_job_abort(void *priv) +{ + struct venus_inst *inst = priv; + + v4l2_m2m_job_finish(inst->m2m_dev, inst->m2m_ctx); +} +EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort); + +void venus_helper_init_instance(struct venus_inst *inst) +{ + if (inst->session_type == VIDC_SESSION_TYPE_DEC) { + INIT_LIST_HEAD(&inst->delayed_process); + INIT_WORK(&inst->delayed_process_work, + delayed_process_buf_func); + } +} +EXPORT_SYMBOL_GPL(venus_helper_init_instance); diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h new file mode 100644 index 000000000000..971392be5df5 --- /dev/null +++ b/drivers/media/platform/qcom/venus/helpers.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __VENUS_HELPERS_H__ +#define __VENUS_HELPERS_H__ + +#include <media/videobuf2-v4l2.h> + +struct venus_inst; + +bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt); +struct vb2_v4l2_buffer *venus_helper_find_buf(struct venus_inst *inst, + unsigned int type, u32 idx); +void venus_helper_buffers_done(struct venus_inst *inst, + enum vb2_buffer_state state); +int venus_helper_vb2_buf_init(struct vb2_buffer *vb); +int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb); +void venus_helper_vb2_buf_queue(struct vb2_buffer *vb); +void venus_helper_vb2_stop_streaming(struct vb2_queue *q); +int venus_helper_vb2_start_streaming(struct venus_inst *inst); +void venus_helper_m2m_device_run(void *priv); +void venus_helper_m2m_job_abort(void *priv); +int venus_helper_get_bufreq(struct venus_inst *inst, u32 type, + struct hfi_buffer_requirements *req); +int venus_helper_set_input_resolution(struct venus_inst *inst, + unsigned int width, unsigned int height); +int venus_helper_set_output_resolution(struct venus_inst *inst, + unsigned int width, unsigned int height); +int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs, + unsigned int output_bufs); +int venus_helper_set_color_format(struct venus_inst *inst, u32 fmt); +void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf); +void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx); +void venus_helper_init_instance(struct venus_inst *inst); +#endif diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c new file mode 100644 index 000000000000..c09490876516 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi.c @@ -0,0 +1,522 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/list.h> +#include <linux/completion.h> +#include <linux/platform_device.h> +#include <linux/videodev2.h> + +#include "core.h" +#include "hfi.h" +#include "hfi_cmds.h" +#include "hfi_venus.h" + +#define TIMEOUT msecs_to_jiffies(1000) + +static u32 to_codec_type(u32 pixfmt) +{ + switch (pixfmt) { + case V4L2_PIX_FMT_H264: + case V4L2_PIX_FMT_H264_NO_SC: + return HFI_VIDEO_CODEC_H264; + case V4L2_PIX_FMT_H263: + return HFI_VIDEO_CODEC_H263; + case V4L2_PIX_FMT_MPEG1: + return HFI_VIDEO_CODEC_MPEG1; + case V4L2_PIX_FMT_MPEG2: + return HFI_VIDEO_CODEC_MPEG2; + case V4L2_PIX_FMT_MPEG4: + return HFI_VIDEO_CODEC_MPEG4; + case V4L2_PIX_FMT_VC1_ANNEX_G: + case V4L2_PIX_FMT_VC1_ANNEX_L: + return HFI_VIDEO_CODEC_VC1; + case V4L2_PIX_FMT_VP8: + return HFI_VIDEO_CODEC_VP8; + case V4L2_PIX_FMT_VP9: + return HFI_VIDEO_CODEC_VP9; + case V4L2_PIX_FMT_XVID: + return HFI_VIDEO_CODEC_DIVX; + default: + return 0; + } +} + +int hfi_core_init(struct venus_core *core) +{ + int ret = 0; + + mutex_lock(&core->lock); + + if (core->state >= CORE_INIT) + goto unlock; + + reinit_completion(&core->done); + + ret = core->ops->core_init(core); + if (ret) + goto unlock; + + ret = wait_for_completion_timeout(&core->done, TIMEOUT); + if (!ret) { + ret = -ETIMEDOUT; + goto unlock; + } + + ret = 0; + + if (core->error != HFI_ERR_NONE) { + ret = -EIO; + goto unlock; + } + + core->state = CORE_INIT; +unlock: + mutex_unlock(&core->lock); + return ret; +} + +static int core_deinit_wait_atomic_t(atomic_t *p) +{ + schedule(); + return 0; +} + +int hfi_core_deinit(struct venus_core *core, bool blocking) +{ + int ret = 0, empty; + + mutex_lock(&core->lock); + + if (core->state == CORE_UNINIT) + goto unlock; + + empty = list_empty(&core->instances); + + if (!empty && !blocking) { + ret = -EBUSY; + goto unlock; + } + + if (!empty) { + mutex_unlock(&core->lock); + wait_on_atomic_t(&core->insts_count, core_deinit_wait_atomic_t, + TASK_UNINTERRUPTIBLE); + mutex_lock(&core->lock); + } + + ret = core->ops->core_deinit(core); + + if (!ret) + core->state = CORE_UNINIT; + +unlock: + mutex_unlock(&core->lock); + return ret; +} + +int hfi_core_suspend(struct venus_core *core) +{ + if (core->state != CORE_INIT) + return 0; + + return core->ops->suspend(core); +} + +int hfi_core_resume(struct venus_core *core, bool force) +{ + if (!force && core->state != CORE_INIT) + return 0; + + return core->ops->resume(core); +} + +int hfi_core_trigger_ssr(struct venus_core *core, u32 type) +{ + return core->ops->core_trigger_ssr(core, type); +} + +int hfi_core_ping(struct venus_core *core) +{ + int ret; + + mutex_lock(&core->lock); + + ret = core->ops->core_ping(core, 0xbeef); + if (ret) + goto unlock; + + ret = wait_for_completion_timeout(&core->done, TIMEOUT); + if (!ret) { + ret = -ETIMEDOUT; + goto unlock; + } + ret = 0; + if (core->error != HFI_ERR_NONE) + ret = -ENODEV; +unlock: + mutex_unlock(&core->lock); + return ret; +} + +static int wait_session_msg(struct venus_inst *inst) +{ + int ret; + + ret = wait_for_completion_timeout(&inst->done, TIMEOUT); + if (!ret) + return -ETIMEDOUT; + + if (inst->error != HFI_ERR_NONE) + return -EIO; + + return 0; +} + +int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops) +{ + struct venus_core *core = inst->core; + + if (!ops) + return -EINVAL; + + inst->state = INST_UNINIT; + init_completion(&inst->done); + inst->ops = ops; + + mutex_lock(&core->lock); + list_add_tail(&inst->list, &core->instances); + atomic_inc(&core->insts_count); + mutex_unlock(&core->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_create); + +int hfi_session_init(struct venus_inst *inst, u32 pixfmt) +{ + struct venus_core *core = inst->core; + const struct hfi_ops *ops = core->ops; + u32 codec; + int ret; + + codec = to_codec_type(pixfmt); + reinit_completion(&inst->done); + + ret = ops->session_init(inst, inst->session_type, codec); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + inst->state = INST_INIT; + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_init); + +void hfi_session_destroy(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + + mutex_lock(&core->lock); + list_del_init(&inst->list); + atomic_dec(&core->insts_count); + wake_up_atomic_t(&core->insts_count); + mutex_unlock(&core->lock); +} +EXPORT_SYMBOL_GPL(hfi_session_destroy); + +int hfi_session_deinit(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (inst->state == INST_UNINIT) + return 0; + + if (inst->state < INST_INIT) + return -EINVAL; + + reinit_completion(&inst->done); + + ret = ops->session_end(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + inst->state = INST_UNINIT; + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_deinit); + +int hfi_session_start(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (inst->state != INST_LOAD_RESOURCES) + return -EINVAL; + + reinit_completion(&inst->done); + + ret = ops->session_start(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + inst->state = INST_START; + + return 0; +} + +int hfi_session_stop(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (inst->state != INST_START) + return -EINVAL; + + reinit_completion(&inst->done); + + ret = ops->session_stop(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + inst->state = INST_STOP; + + return 0; +} + +int hfi_session_continue(struct venus_inst *inst) +{ + struct venus_core *core = inst->core; + + if (core->res->hfi_version != HFI_VERSION_3XX) + return 0; + + return core->ops->session_continue(inst); +} +EXPORT_SYMBOL_GPL(hfi_session_continue); + +int hfi_session_abort(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + reinit_completion(&inst->done); + + ret = ops->session_abort(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + return 0; +} + +int hfi_session_load_res(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (inst->state != INST_INIT) + return -EINVAL; + + reinit_completion(&inst->done); + + ret = ops->session_load_res(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + inst->state = INST_LOAD_RESOURCES; + + return 0; +} + +int hfi_session_unload_res(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (inst->state != INST_STOP) + return -EINVAL; + + reinit_completion(&inst->done); + + ret = ops->session_release_res(inst); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + inst->state = INST_RELEASE_RESOURCES; + + return 0; +} + +int hfi_session_flush(struct venus_inst *inst) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + reinit_completion(&inst->done); + + ret = ops->session_flush(inst, HFI_FLUSH_ALL); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_flush); + +int hfi_session_set_buffers(struct venus_inst *inst, struct hfi_buffer_desc *bd) +{ + const struct hfi_ops *ops = inst->core->ops; + + return ops->session_set_buffers(inst, bd); +} + +int hfi_session_unset_buffers(struct venus_inst *inst, + struct hfi_buffer_desc *bd) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + reinit_completion(&inst->done); + + ret = ops->session_unset_buffers(inst, bd); + if (ret) + return ret; + + if (!bd->response_required) + return 0; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + return 0; +} + +int hfi_session_get_property(struct venus_inst *inst, u32 ptype, + union hfi_get_property *hprop) +{ + const struct hfi_ops *ops = inst->core->ops; + int ret; + + if (inst->state < INST_INIT || inst->state >= INST_STOP) + return -EINVAL; + + reinit_completion(&inst->done); + + ret = ops->session_get_property(inst, ptype); + if (ret) + return ret; + + ret = wait_session_msg(inst); + if (ret) + return ret; + + *hprop = inst->hprop; + + return 0; +} +EXPORT_SYMBOL_GPL(hfi_session_get_property); + +int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata) +{ + const struct hfi_ops *ops = inst->core->ops; + + if (inst->state < INST_INIT || inst->state >= INST_STOP) + return -EINVAL; + + return ops->session_set_property(inst, ptype, pdata); +} +EXPORT_SYMBOL_GPL(hfi_session_set_property); + +int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd) +{ + const struct hfi_ops *ops = inst->core->ops; + + if (fd->buffer_type == HFI_BUFFER_INPUT) + return ops->session_etb(inst, fd); + else if (fd->buffer_type == HFI_BUFFER_OUTPUT) + return ops->session_ftb(inst, fd); + + return -EINVAL; +} + +irqreturn_t hfi_isr_thread(int irq, void *dev_id) +{ + struct venus_core *core = dev_id; + + return core->ops->isr_thread(core); +} + +irqreturn_t hfi_isr(int irq, void *dev) +{ + struct venus_core *core = dev; + + return core->ops->isr(core); +} + +int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops) +{ + int ret; + + if (!ops) + return -EINVAL; + + atomic_set(&core->insts_count, 0); + core->core_ops = ops; + core->state = CORE_UNINIT; + init_completion(&core->done); + pkt_set_version(core->res->hfi_version); + ret = venus_hfi_create(core); + + return ret; +} + +void hfi_destroy(struct venus_core *core) +{ + venus_hfi_destroy(core); +} diff --git a/drivers/media/platform/qcom/venus/hfi.h b/drivers/media/platform/qcom/venus/hfi.h new file mode 100644 index 000000000000..5466b7d60dd0 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __HFI_H__ +#define __HFI_H__ + +#include <linux/interrupt.h> + +#include "hfi_helper.h" + +#define VIDC_SESSION_TYPE_VPE 0 +#define VIDC_SESSION_TYPE_ENC 1 +#define VIDC_SESSION_TYPE_DEC 2 + +#define VIDC_RESOURCE_NONE 0 +#define VIDC_RESOURCE_OCMEM 1 +#define VIDC_RESOURCE_VMEM 2 + +struct hfi_buffer_desc { + u32 buffer_type; + u32 buffer_size; + u32 num_buffers; + u32 device_addr; + u32 extradata_addr; + u32 extradata_size; + u32 response_required; +}; + +struct hfi_frame_data { + u32 buffer_type; + u32 device_addr; + u32 extradata_addr; + u64 timestamp; + u32 flags; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 mark_target; + u32 mark_data; + u32 clnt_data; + u32 extradata_size; +}; + +union hfi_get_property { + struct hfi_profile_level profile_level; + struct hfi_buffer_requirements bufreq[HFI_BUFFER_TYPE_MAX]; +}; + +/* HFI events */ +#define EVT_SYS_EVENT_CHANGE 1 +#define EVT_SYS_WATCHDOG_TIMEOUT 2 +#define EVT_SYS_ERROR 3 +#define EVT_SESSION_ERROR 4 + +/* HFI event callback structure */ +struct hfi_event_data { + u32 error; + u32 height; + u32 width; + u32 event_type; + u32 packet_buffer; + u32 extradata_buffer; + u32 tag; + u32 profile; + u32 level; +}; + +/* define core states */ +#define CORE_UNINIT 0 +#define CORE_INIT 1 + +/* define instance states */ +#define INST_UNINIT 2 +#define INST_INIT 3 +#define INST_LOAD_RESOURCES 4 +#define INST_START 5 +#define INST_STOP 6 +#define INST_RELEASE_RESOURCES 7 + +struct venus_core; +struct venus_inst; + +struct hfi_core_ops { + void (*event_notify)(struct venus_core *core, u32 event); +}; + +struct hfi_inst_ops { + void (*buf_done)(struct venus_inst *inst, unsigned int buf_type, + u32 tag, u32 bytesused, u32 data_offset, u32 flags, + u32 hfi_flags, u64 timestamp_us); + void (*event_notify)(struct venus_inst *inst, u32 event, + struct hfi_event_data *data); +}; + +struct hfi_ops { + int (*core_init)(struct venus_core *core); + int (*core_deinit)(struct venus_core *core); + int (*core_ping)(struct venus_core *core, u32 cookie); + int (*core_trigger_ssr)(struct venus_core *core, u32 trigger_type); + + int (*session_init)(struct venus_inst *inst, u32 session_type, + u32 codec); + int (*session_end)(struct venus_inst *inst); + int (*session_abort)(struct venus_inst *inst); + int (*session_flush)(struct venus_inst *inst, u32 flush_mode); + int (*session_start)(struct venus_inst *inst); + int (*session_stop)(struct venus_inst *inst); + int (*session_continue)(struct venus_inst *inst); + int (*session_etb)(struct venus_inst *inst, struct hfi_frame_data *fd); + int (*session_ftb)(struct venus_inst *inst, struct hfi_frame_data *fd); + int (*session_set_buffers)(struct venus_inst *inst, + struct hfi_buffer_desc *bd); + int (*session_unset_buffers)(struct venus_inst *inst, + struct hfi_buffer_desc *bd); + int (*session_load_res)(struct venus_inst *inst); + int (*session_release_res)(struct venus_inst *inst); + int (*session_parse_seq_hdr)(struct venus_inst *inst, u32 seq_hdr, + u32 seq_hdr_len); + int (*session_get_seq_hdr)(struct venus_inst *inst, u32 seq_hdr, + u32 seq_hdr_len); + int (*session_set_property)(struct venus_inst *inst, u32 ptype, + void *pdata); + int (*session_get_property)(struct venus_inst *inst, u32 ptype); + + int (*resume)(struct venus_core *core); + int (*suspend)(struct venus_core *core); + + /* interrupt operations */ + irqreturn_t (*isr)(struct venus_core *core); + irqreturn_t (*isr_thread)(struct venus_core *core); +}; + +int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops); +void hfi_destroy(struct venus_core *core); + +int hfi_core_init(struct venus_core *core); +int hfi_core_deinit(struct venus_core *core, bool blocking); +int hfi_core_suspend(struct venus_core *core); +int hfi_core_resume(struct venus_core *core, bool force); +int hfi_core_trigger_ssr(struct venus_core *core, u32 type); +int hfi_core_ping(struct venus_core *core); +int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops); +void hfi_session_destroy(struct venus_inst *inst); +int hfi_session_init(struct venus_inst *inst, u32 pixfmt); +int hfi_session_deinit(struct venus_inst *inst); +int hfi_session_start(struct venus_inst *inst); +int hfi_session_stop(struct venus_inst *inst); +int hfi_session_continue(struct venus_inst *inst); +int hfi_session_abort(struct venus_inst *inst); +int hfi_session_load_res(struct venus_inst *inst); +int hfi_session_unload_res(struct venus_inst *inst); +int hfi_session_flush(struct venus_inst *inst); +int hfi_session_set_buffers(struct venus_inst *inst, + struct hfi_buffer_desc *bd); +int hfi_session_unset_buffers(struct venus_inst *inst, + struct hfi_buffer_desc *bd); +int hfi_session_get_property(struct venus_inst *inst, u32 ptype, + union hfi_get_property *hprop); +int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata); +int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *f); +irqreturn_t hfi_isr_thread(int irq, void *dev_id); +irqreturn_t hfi_isr(int irq, void *dev); + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c new file mode 100644 index 000000000000..b83c5b8ddccb --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_cmds.c @@ -0,0 +1,1259 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/errno.h> +#include <linux/hash.h> + +#include "hfi_cmds.h" + +static enum hfi_version hfi_ver; + +void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_INIT; + pkt->arch_type = arch_type; +} + +void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_PC_PREP; +} + +void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable) +{ + struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1]; + + pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32); + pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->data[0] = HFI_PROPERTY_SYS_IDLE_INDICATOR; + hfi->enable = enable; +} + +void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode, + u32 config) +{ + struct hfi_debug_config *hfi; + + pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32); + pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG; + hfi = (struct hfi_debug_config *)&pkt->data[1]; + hfi->config = config; + hfi->mode = mode; +} + +void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode) +{ + pkt->hdr.size = sizeof(*pkt) + sizeof(u32); + pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE; + pkt->data[1] = mode; +} + +int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size, + u32 addr, void *cookie) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_SET_RESOURCE; + pkt->resource_handle = hash32_ptr(cookie); + + switch (id) { + case VIDC_RESOURCE_OCMEM: + case VIDC_RESOURCE_VMEM: { + struct hfi_resource_ocmem *res = + (struct hfi_resource_ocmem *)&pkt->resource_data[0]; + + res->size = size; + res->mem = addr; + pkt->resource_type = HFI_RESOURCE_OCMEM; + pkt->hdr.size += sizeof(*res) - sizeof(u32); + break; + } + case VIDC_RESOURCE_NONE: + default: + return -ENOTSUPP; + } + + return 0; +} + +int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id, + u32 size, void *cookie) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_RELEASE_RESOURCE; + pkt->resource_handle = hash32_ptr(cookie); + + switch (id) { + case VIDC_RESOURCE_OCMEM: + case VIDC_RESOURCE_VMEM: + pkt->resource_type = HFI_RESOURCE_OCMEM; + break; + case VIDC_RESOURCE_NONE: + break; + default: + return -ENOTSUPP; + } + + return 0; +} + +void pkt_sys_ping(struct hfi_sys_ping_pkt *pkt, u32 cookie) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_PING; + pkt->client_data = cookie; +} + +void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable) +{ + struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1]; + + pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32); + pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY; + pkt->num_properties = 1; + pkt->data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL; + hfi->enable = enable; +} + +int pkt_sys_ssr_cmd(struct hfi_sys_test_ssr_pkt *pkt, u32 trigger_type) +{ + switch (trigger_type) { + case HFI_TEST_SSR_SW_ERR_FATAL: + case HFI_TEST_SSR_SW_DIV_BY_ZERO: + case HFI_TEST_SSR_HW_WDOG_IRQ: + break; + default: + return -EINVAL; + } + + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_TEST_SSR; + pkt->trigger_type = trigger_type; + + return 0; +} + +void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt) +{ + pkt->hdr.size = sizeof(*pkt); + pkt->hdr.pkt_type = HFI_CMD_SYS_GET_PROPERTY; + pkt->num_properties = 1; + pkt->data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION; +} + +int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie, + u32 session_type, u32 codec) +{ + if (!pkt || !cookie || !codec) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SYS_SESSION_INIT; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->session_domain = session_type; + pkt->session_codec = codec; + + return 0; +} + +void pkt_session_cmd(struct hfi_session_pkt *pkt, u32 pkt_type, void *cookie) +{ + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = pkt_type; + pkt->shdr.session_id = hash32_ptr(cookie); +} + +int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt, + void *cookie, struct hfi_buffer_desc *bd) +{ + unsigned int i; + + if (!cookie || !pkt || !bd) + return -EINVAL; + + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_BUFFERS; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->buffer_size = bd->buffer_size; + pkt->min_buffer_size = bd->buffer_size; + pkt->num_buffers = bd->num_buffers; + + if (bd->buffer_type == HFI_BUFFER_OUTPUT || + bd->buffer_type == HFI_BUFFER_OUTPUT2) { + struct hfi_buffer_info *bi; + + pkt->extradata_size = bd->extradata_size; + pkt->shdr.hdr.size = sizeof(*pkt) - sizeof(u32) + + (bd->num_buffers * sizeof(*bi)); + bi = (struct hfi_buffer_info *)pkt->buffer_info; + for (i = 0; i < pkt->num_buffers; i++) { + bi->buffer_addr = bd->device_addr; + bi->extradata_addr = bd->extradata_addr; + } + } else { + pkt->extradata_size = 0; + pkt->shdr.hdr.size = sizeof(*pkt) + + ((bd->num_buffers - 1) * sizeof(u32)); + for (i = 0; i < pkt->num_buffers; i++) + pkt->buffer_info[i] = bd->device_addr; + } + + pkt->buffer_type = bd->buffer_type; + + return 0; +} + +int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt, + void *cookie, struct hfi_buffer_desc *bd) +{ + unsigned int i; + + if (!cookie || !pkt || !bd) + return -EINVAL; + + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_RELEASE_BUFFERS; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->buffer_size = bd->buffer_size; + pkt->num_buffers = bd->num_buffers; + + if (bd->buffer_type == HFI_BUFFER_OUTPUT || + bd->buffer_type == HFI_BUFFER_OUTPUT2) { + struct hfi_buffer_info *bi; + + bi = (struct hfi_buffer_info *)pkt->buffer_info; + for (i = 0; i < pkt->num_buffers; i++) { + bi->buffer_addr = bd->device_addr; + bi->extradata_addr = bd->extradata_addr; + } + pkt->shdr.hdr.size = + sizeof(struct hfi_session_set_buffers_pkt) - + sizeof(u32) + (bd->num_buffers * sizeof(*bi)); + } else { + for (i = 0; i < pkt->num_buffers; i++) + pkt->buffer_info[i] = bd->device_addr; + + pkt->extradata_size = 0; + pkt->shdr.hdr.size = + sizeof(struct hfi_session_set_buffers_pkt) + + ((bd->num_buffers - 1) * sizeof(u32)); + } + + pkt->response_req = bd->response_required; + pkt->buffer_type = bd->buffer_type; + + return 0; +} + +int pkt_session_etb_decoder(struct hfi_session_empty_buffer_compressed_pkt *pkt, + void *cookie, struct hfi_frame_data *in_frame) +{ + if (!cookie || !in_frame->device_addr) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp); + pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp); + pkt->flags = in_frame->flags; + pkt->mark_target = in_frame->mark_target; + pkt->mark_data = in_frame->mark_data; + pkt->offset = in_frame->offset; + pkt->alloc_len = in_frame->alloc_len; + pkt->filled_len = in_frame->filled_len; + pkt->input_tag = in_frame->clnt_data; + pkt->packet_buffer = in_frame->device_addr; + + return 0; +} + +int pkt_session_etb_encoder( + struct hfi_session_empty_buffer_uncompressed_plane0_pkt *pkt, + void *cookie, struct hfi_frame_data *in_frame) +{ + if (!cookie || !in_frame->device_addr) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->view_id = 0; + pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp); + pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp); + pkt->flags = in_frame->flags; + pkt->mark_target = in_frame->mark_target; + pkt->mark_data = in_frame->mark_data; + pkt->offset = in_frame->offset; + pkt->alloc_len = in_frame->alloc_len; + pkt->filled_len = in_frame->filled_len; + pkt->input_tag = in_frame->clnt_data; + pkt->packet_buffer = in_frame->device_addr; + pkt->extradata_buffer = in_frame->extradata_addr; + + return 0; +} + +int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt, void *cookie, + struct hfi_frame_data *out_frame) +{ + if (!cookie || !out_frame || !out_frame->device_addr) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FILL_BUFFER; + pkt->shdr.session_id = hash32_ptr(cookie); + + if (out_frame->buffer_type == HFI_BUFFER_OUTPUT) + pkt->stream_id = 0; + else if (out_frame->buffer_type == HFI_BUFFER_OUTPUT2) + pkt->stream_id = 1; + + pkt->output_tag = out_frame->clnt_data; + pkt->packet_buffer = out_frame->device_addr; + pkt->extradata_buffer = out_frame->extradata_addr; + pkt->alloc_len = out_frame->alloc_len; + pkt->filled_len = out_frame->filled_len; + pkt->offset = out_frame->offset; + pkt->data[0] = out_frame->extradata_size; + + return 0; +} + +int pkt_session_parse_seq_header( + struct hfi_session_parse_sequence_header_pkt *pkt, + void *cookie, u32 seq_hdr, u32 seq_hdr_len) +{ + if (!cookie || !seq_hdr || !seq_hdr_len) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->header_len = seq_hdr_len; + pkt->packet_buffer = seq_hdr; + + return 0; +} + +int pkt_session_get_seq_hdr(struct hfi_session_get_sequence_header_pkt *pkt, + void *cookie, u32 seq_hdr, u32 seq_hdr_len) +{ + if (!cookie || !seq_hdr || !seq_hdr_len) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_SEQUENCE_HEADER; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->buffer_len = seq_hdr_len; + pkt->packet_buffer = seq_hdr; + + return 0; +} + +int pkt_session_flush(struct hfi_session_flush_pkt *pkt, void *cookie, u32 type) +{ + switch (type) { + case HFI_FLUSH_INPUT: + case HFI_FLUSH_OUTPUT: + case HFI_FLUSH_OUTPUT2: + case HFI_FLUSH_ALL: + break; + default: + return -EINVAL; + } + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->flush_type = type; + + return 0; +} + +static int pkt_session_get_property_1x(struct hfi_session_get_property_pkt *pkt, + void *cookie, u32 ptype) +{ + switch (ptype) { + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: + case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: + break; + default: + return -EINVAL; + } + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->num_properties = 1; + pkt->data[0] = ptype; + + return 0; +} + +static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt, + void *cookie, u32 ptype, void *pdata) +{ + void *prop_data; + int ret = 0; + + if (!pkt || !cookie || !pdata) + return -EINVAL; + + prop_data = &pkt->data[1]; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->num_properties = 1; + + switch (ptype) { + case HFI_PROPERTY_CONFIG_FRAME_RATE: { + struct hfi_framerate *in = pdata, *frate = prop_data; + + pkt->data[0] = HFI_PROPERTY_CONFIG_FRAME_RATE; + frate->buffer_type = in->buffer_type; + frate->framerate = in->framerate; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*frate); + break; + } + case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT: { + struct hfi_uncompressed_format_select *in = pdata; + struct hfi_uncompressed_format_select *hfi = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT; + hfi->buffer_type = in->buffer_type; + hfi->format = in->format; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi); + break; + } + case HFI_PROPERTY_PARAM_FRAME_SIZE: { + struct hfi_framesize *in = pdata, *fsize = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_FRAME_SIZE; + fsize->buffer_type = in->buffer_type; + fsize->height = in->height; + fsize->width = in->width; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fsize); + break; + } + case HFI_PROPERTY_CONFIG_REALTIME: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_CONFIG_REALTIME; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: { + struct hfi_buffer_count_actual *in = pdata, *count = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL; + count->count_actual = in->count_actual; + count->type = in->type; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count); + break; + } + case HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL: { + struct hfi_buffer_size_actual *in = pdata, *sz = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL; + sz->size = in->size; + sz->type = in->type; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*sz); + break; + } + case HFI_PROPERTY_PARAM_BUFFER_DISPLAY_HOLD_COUNT_ACTUAL: { + struct hfi_buffer_display_hold_count_actual *in = pdata; + struct hfi_buffer_display_hold_count_actual *count = prop_data; + + pkt->data[0] = + HFI_PROPERTY_PARAM_BUFFER_DISPLAY_HOLD_COUNT_ACTUAL; + count->hold_count = in->hold_count; + count->type = in->type; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count); + break; + } + case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT: { + struct hfi_nal_stream_format_select *in = pdata; + struct hfi_nal_stream_format_select *fmt = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT; + fmt->format = in->format; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fmt); + break; + } + case HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER: { + u32 *in = pdata; + + switch (*in) { + case HFI_OUTPUT_ORDER_DECODE: + case HFI_OUTPUT_ORDER_DISPLAY: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER; + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE: { + struct hfi_enable_picture *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE; + en->picture_type = in->picture_type; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = + HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER: { + struct hfi_enable *in = pdata; + struct hfi_enable *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: { + struct hfi_multi_stream *in = pdata, *multi = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM; + multi->buffer_type = in->buffer_type; + multi->enable = in->enable; + multi->width = in->width; + multi->height = in->height; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi); + break; + } + case HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT: { + struct hfi_display_picture_buffer_count *in = pdata; + struct hfi_display_picture_buffer_count *count = prop_data; + + pkt->data[0] = + HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT; + count->count = in->count; + count->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count); + break; + } + case HFI_PROPERTY_PARAM_DIVX_FORMAT: { + u32 *in = pdata; + + switch (*in) { + case HFI_DIVX_FORMAT_4: + case HFI_DIVX_FORMAT_5: + case HFI_DIVX_FORMAT_6: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[0] = HFI_PROPERTY_PARAM_DIVX_FORMAT; + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = + HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME: + pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME; + pkt->shdr.hdr.size += sizeof(u32); + break; + case HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER: + break; + case HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION: + break; + case HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE: { + struct hfi_bitrate *in = pdata, *brate = prop_data; + + pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE; + brate->bitrate = in->bitrate; + brate->layer_id = in->layer_id; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*brate); + break; + } + case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE: { + struct hfi_bitrate *in = pdata, *hfi = prop_data; + + pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE; + hfi->bitrate = in->bitrate; + hfi->layer_id = in->layer_id; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi); + break; + } + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: { + struct hfi_profile_level *in = pdata, *pl = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT; + pl->level = in->level; + pl->profile = in->profile; + if (pl->profile <= 0) + /* Profile not supported, falling back to high */ + pl->profile = HFI_H264_PROFILE_HIGH; + + if (!pl->level) + /* Level not supported, falling back to 1 */ + pl->level = 1; + + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*pl); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL: { + struct hfi_h264_entropy_control *in = pdata, *hfi = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL; + hfi->entropy_mode = in->entropy_mode; + if (hfi->entropy_mode == HFI_H264_ENTROPY_CABAC) + hfi->cabac_model = in->cabac_model; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi); + break; + } + case HFI_PROPERTY_PARAM_VENC_RATE_CONTROL: { + u32 *in = pdata; + + switch (*in) { + case HFI_RATE_CONTROL_OFF: + case HFI_RATE_CONTROL_CBR_CFR: + case HFI_RATE_CONTROL_CBR_VFR: + case HFI_RATE_CONTROL_VBR_CFR: + case HFI_RATE_CONTROL_VBR_VFR: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_RATE_CONTROL; + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION: { + struct hfi_mpeg4_time_resolution *in = pdata, *res = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION; + res->time_increment_resolution = in->time_increment_resolution; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*res); + break; + } + case HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION: { + struct hfi_mpeg4_header_extension *in = pdata, *ext = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION; + ext->header_extension = in->header_extension; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ext); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL: { + struct hfi_h264_db_control *in = pdata, *db = prop_data; + + switch (in->mode) { + case HFI_H264_DB_MODE_DISABLE: + case HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY: + case HFI_H264_DB_MODE_ALL_BOUNDARY: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL; + db->mode = in->mode; + db->slice_alpha_offset = in->slice_alpha_offset; + db->slice_beta_offset = in->slice_beta_offset; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*db); + break; + } + case HFI_PROPERTY_PARAM_VENC_SESSION_QP: { + struct hfi_quantization *in = pdata, *quant = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_SESSION_QP; + quant->qp_i = in->qp_i; + quant->qp_p = in->qp_p; + quant->qp_b = in->qp_b; + quant->layer_id = in->layer_id; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant); + break; + } + case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE: { + struct hfi_quantization_range *in = pdata, *range = prop_data; + u32 min_qp, max_qp; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE; + min_qp = in->min_qp; + max_qp = in->max_qp; + + /* We'll be packing in the qp, so make sure we + * won't be losing data when masking + */ + if (min_qp > 0xff || max_qp > 0xff) { + ret = -ERANGE; + break; + } + + /* When creating the packet, pack the qp value as + * 0xiippbb, where ii = qp range for I-frames, + * pp = qp range for P-frames, etc. + */ + range->min_qp = min_qp | min_qp << 8 | min_qp << 16; + range->max_qp = max_qp | max_qp << 8 | max_qp << 16; + range->layer_id = in->layer_id; + + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*range); + break; + } + case HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG: { + struct hfi_vc1e_perf_cfg_type *in = pdata, *perf = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG; + + memcpy(perf->search_range_x_subsampled, + in->search_range_x_subsampled, + sizeof(perf->search_range_x_subsampled)); + memcpy(perf->search_range_y_subsampled, + in->search_range_y_subsampled, + sizeof(perf->search_range_y_subsampled)); + + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*perf); + break; + } + case HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES: { + struct hfi_max_num_b_frames *bframes = prop_data; + u32 *in = pdata; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES; + bframes->max_num_b_frames = *in; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*bframes); + break; + } + case HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD: { + struct hfi_intra_period *in = pdata, *intra = prop_data; + + pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD; + intra->pframes = in->pframes; + intra->bframes = in->bframes; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra); + break; + } + case HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD: { + struct hfi_idr_period *in = pdata, *idr = prop_data; + + pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD; + idr->idr_period = in->idr_period; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*idr); + break; + } + case HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR: { + struct hfi_conceal_color *color = prop_data; + u32 *in = pdata; + + pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR; + color->conceal_color = *in; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color); + break; + } + case HFI_PROPERTY_CONFIG_VPE_OPERATIONS: { + struct hfi_operations_type *in = pdata, *ops = prop_data; + + switch (in->rotation) { + case HFI_ROTATE_NONE: + case HFI_ROTATE_90: + case HFI_ROTATE_180: + case HFI_ROTATE_270: + break; + default: + ret = -EINVAL; + break; + } + + switch (in->flip) { + case HFI_FLIP_NONE: + case HFI_FLIP_HORIZONTAL: + case HFI_FLIP_VERTICAL: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[0] = HFI_PROPERTY_CONFIG_VPE_OPERATIONS; + ops->rotation = in->rotation; + ops->flip = in->flip; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ops); + break; + } + case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: { + struct hfi_intra_refresh *in = pdata, *intra = prop_data; + + switch (in->mode) { + case HFI_INTRA_REFRESH_NONE: + case HFI_INTRA_REFRESH_ADAPTIVE: + case HFI_INTRA_REFRESH_CYCLIC: + case HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE: + case HFI_INTRA_REFRESH_RANDOM: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH; + intra->mode = in->mode; + intra->air_mbs = in->air_mbs; + intra->air_ref = in->air_ref; + intra->cir_mbs = in->cir_mbs; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra); + break; + } + case HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL: { + struct hfi_multi_slice_control *in = pdata, *multi = prop_data; + + switch (in->multi_slice) { + case HFI_MULTI_SLICE_OFF: + case HFI_MULTI_SLICE_GOB: + case HFI_MULTI_SLICE_BY_MB_COUNT: + case HFI_MULTI_SLICE_BY_BYTE_COUNT: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL; + multi->multi_slice = in->multi_slice; + multi->slice_size = in->slice_size; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi); + break; + } + case HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO: { + struct hfi_h264_vui_timing_info *in = pdata, *vui = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO; + vui->enable = in->enable; + vui->fixed_framerate = in->fixed_framerate; + vui->time_scale = in->time_scale; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*vui); + break; + } + case HFI_PROPERTY_CONFIG_VPE_DEINTERLACE: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_CONFIG_VPE_DEINTERLACE; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE: { + struct hfi_buffer_alloc_mode *in = pdata, *mode = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE; + mode->type = in->type; + mode->mode = in->mode; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mode); + break; + } + case HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = + HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD: { + struct hfi_scs_threshold *thres = prop_data; + u32 *in = pdata; + + pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD; + thres->threshold_value = *in; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*thres); + break; + } + case HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT: { + struct hfi_mvc_buffer_layout_descp_type *in = pdata; + struct hfi_mvc_buffer_layout_descp_type *mvc = prop_data; + + switch (in->layout_type) { + case HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM: + case HFI_MVC_BUFFER_LAYOUT_SEQ: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[0] = HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT; + mvc->layout_type = in->layout_type; + mvc->bright_view_first = in->bright_view_first; + mvc->ngap = in->ngap; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mvc); + break; + } + case HFI_PROPERTY_PARAM_VENC_LTRMODE: { + struct hfi_ltr_mode *in = pdata, *ltr = prop_data; + + switch (in->ltr_mode) { + case HFI_LTR_MODE_DISABLE: + case HFI_LTR_MODE_MANUAL: + case HFI_LTR_MODE_PERIODIC: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_LTRMODE; + ltr->ltr_mode = in->ltr_mode; + ltr->ltr_count = in->ltr_count; + ltr->trust_mode = in->trust_mode; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr); + break; + } + case HFI_PROPERTY_CONFIG_VENC_USELTRFRAME: { + struct hfi_ltr_use *in = pdata, *ltr_use = prop_data; + + pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_USELTRFRAME; + ltr_use->frames = in->frames; + ltr_use->ref_ltr = in->ref_ltr; + ltr_use->use_constrnt = in->use_constrnt; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_use); + break; + } + case HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME: { + struct hfi_ltr_mark *in = pdata, *ltr_mark = prop_data; + + pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME; + ltr_mark->mark_frame = in->mark_frame; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_mark); + break; + } + case HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER: { + u32 *in = pdata; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER; + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER: { + u32 *in = pdata; + + pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER; + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_INITIAL_QP: { + struct hfi_initial_quantization *in = pdata, *quant = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_INITIAL_QP; + quant->init_qp_enable = in->init_qp_enable; + quant->qp_i = in->qp_i; + quant->qp_p = in->qp_p; + quant->qp_b = in->qp_b; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant); + break; + } + case HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION: { + struct hfi_vpe_color_space_conversion *in = pdata; + struct hfi_vpe_color_space_conversion *csc = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION; + memcpy(csc->csc_matrix, in->csc_matrix, + sizeof(csc->csc_matrix)); + memcpy(csc->csc_bias, in->csc_bias, sizeof(csc->csc_bias)); + memcpy(csc->csc_limit, in->csc_limit, sizeof(csc->csc_limit)); + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*csc); + break; + } + case HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = + HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_CONFIG_VENC_PERF_MODE: { + u32 *in = pdata; + + pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_PERF_MODE; + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER: { + u32 *in = pdata; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER; + pkt->data[1] = *in; + pkt->shdr.hdr.size += sizeof(u32) * 2; + break; + } + case HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2: { + struct hfi_enable *in = pdata, *en = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2; + en->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en); + break; + } + case HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE: { + struct hfi_hybrid_hierp *in = pdata, *hierp = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE; + hierp->layers = in->layers; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hierp); + break; + } + + /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */ + case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: + case HFI_PROPERTY_CONFIG_PRIORITY: + case HFI_PROPERTY_CONFIG_BATCH_INFO: + case HFI_PROPERTY_SYS_IDLE_INDICATOR: + case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED: + case HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED: + case HFI_PROPERTY_PARAM_CHROMA_SITE: + case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED: + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED: + case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED: + case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED: + case HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT: + case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE: + case HFI_PROPERTY_PARAM_CODEC_SUPPORTED: + case HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT: + case HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION: + case HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB: + case HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING: + case HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO: + default: + return -EINVAL; + } + + return ret; +} + +static int +pkt_session_get_property_3xx(struct hfi_session_get_property_pkt *pkt, + void *cookie, u32 ptype) +{ + int ret = 0; + + if (!pkt || !cookie) + return -EINVAL; + + pkt->shdr.hdr.size = sizeof(struct hfi_session_get_property_pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->num_properties = 1; + + switch (ptype) { + case HFI_PROPERTY_CONFIG_VDEC_ENTROPY: + pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY; + break; + default: + ret = pkt_session_get_property_1x(pkt, cookie, ptype); + break; + } + + return ret; +} + +static int +pkt_session_set_property_3xx(struct hfi_session_set_property_pkt *pkt, + void *cookie, u32 ptype, void *pdata) +{ + void *prop_data; + int ret = 0; + + if (!pkt || !cookie || !pdata) + return -EINVAL; + + prop_data = &pkt->data[1]; + + pkt->shdr.hdr.size = sizeof(*pkt); + pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY; + pkt->shdr.session_id = hash32_ptr(cookie); + pkt->num_properties = 1; + + /* + * Any session set property which is different in 3XX packetization + * should be added as a new case below. All unchanged session set + * properties will be handled in the default case. + */ + switch (ptype) { + case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: { + struct hfi_multi_stream *in = pdata; + struct hfi_multi_stream_3x *multi = prop_data; + + pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM; + multi->buffer_type = in->buffer_type; + multi->enable = in->enable; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi); + break; + } + case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: { + struct hfi_intra_refresh *in = pdata; + struct hfi_intra_refresh_3x *intra = prop_data; + + switch (in->mode) { + case HFI_INTRA_REFRESH_NONE: + case HFI_INTRA_REFRESH_ADAPTIVE: + case HFI_INTRA_REFRESH_CYCLIC: + case HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE: + case HFI_INTRA_REFRESH_RANDOM: + break; + default: + ret = -EINVAL; + break; + } + + pkt->data[0] = HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH; + intra->mode = in->mode; + intra->mbs = in->cir_mbs; + pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra); + break; + } + case HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER: + /* for 3xx fw version session_continue is used */ + break; + default: + ret = pkt_session_set_property_1x(pkt, cookie, ptype, pdata); + break; + } + + return ret; +} + +int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt, + void *cookie, u32 ptype) +{ + if (hfi_ver == HFI_VERSION_1XX) + return pkt_session_get_property_1x(pkt, cookie, ptype); + + return pkt_session_get_property_3xx(pkt, cookie, ptype); +} + +int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt, + void *cookie, u32 ptype, void *pdata) +{ + if (hfi_ver == HFI_VERSION_1XX) + return pkt_session_set_property_1x(pkt, cookie, ptype, pdata); + + return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata); +} + +void pkt_set_version(enum hfi_version version) +{ + hfi_ver = version; +} diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h new file mode 100644 index 000000000000..f7617cf59914 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_cmds.h @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __VENUS_HFI_CMDS_H__ +#define __VENUS_HFI_CMDS_H__ + +#include "hfi.h" + +/* commands */ +#define HFI_CMD_SYS_INIT 0x10001 +#define HFI_CMD_SYS_PC_PREP 0x10002 +#define HFI_CMD_SYS_SET_RESOURCE 0x10003 +#define HFI_CMD_SYS_RELEASE_RESOURCE 0x10004 +#define HFI_CMD_SYS_SET_PROPERTY 0x10005 +#define HFI_CMD_SYS_GET_PROPERTY 0x10006 +#define HFI_CMD_SYS_SESSION_INIT 0x10007 +#define HFI_CMD_SYS_SESSION_END 0x10008 +#define HFI_CMD_SYS_SET_BUFFERS 0x10009 +#define HFI_CMD_SYS_TEST_SSR 0x10101 + +#define HFI_CMD_SESSION_SET_PROPERTY 0x11001 +#define HFI_CMD_SESSION_SET_BUFFERS 0x11002 +#define HFI_CMD_SESSION_GET_SEQUENCE_HEADER 0x11003 + +#define HFI_CMD_SYS_SESSION_ABORT 0x210001 +#define HFI_CMD_SYS_PING 0x210002 + +#define HFI_CMD_SESSION_LOAD_RESOURCES 0x211001 +#define HFI_CMD_SESSION_START 0x211002 +#define HFI_CMD_SESSION_STOP 0x211003 +#define HFI_CMD_SESSION_EMPTY_BUFFER 0x211004 +#define HFI_CMD_SESSION_FILL_BUFFER 0x211005 +#define HFI_CMD_SESSION_SUSPEND 0x211006 +#define HFI_CMD_SESSION_RESUME 0x211007 +#define HFI_CMD_SESSION_FLUSH 0x211008 +#define HFI_CMD_SESSION_GET_PROPERTY 0x211009 +#define HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER 0x21100a +#define HFI_CMD_SESSION_RELEASE_BUFFERS 0x21100b +#define HFI_CMD_SESSION_RELEASE_RESOURCES 0x21100c +#define HFI_CMD_SESSION_CONTINUE 0x21100d +#define HFI_CMD_SESSION_SYNC 0x21100e + +/* command packets */ +struct hfi_sys_init_pkt { + struct hfi_pkt_hdr hdr; + u32 arch_type; +}; + +struct hfi_sys_pc_prep_pkt { + struct hfi_pkt_hdr hdr; +}; + +struct hfi_sys_set_resource_pkt { + struct hfi_pkt_hdr hdr; + u32 resource_handle; + u32 resource_type; + u32 resource_data[1]; +}; + +struct hfi_sys_release_resource_pkt { + struct hfi_pkt_hdr hdr; + u32 resource_type; + u32 resource_handle; +}; + +struct hfi_sys_set_property_pkt { + struct hfi_pkt_hdr hdr; + u32 num_properties; + u32 data[1]; +}; + +struct hfi_sys_get_property_pkt { + struct hfi_pkt_hdr hdr; + u32 num_properties; + u32 data[1]; +}; + +struct hfi_sys_set_buffers_pkt { + struct hfi_pkt_hdr hdr; + u32 buffer_type; + u32 buffer_size; + u32 num_buffers; + u32 buffer_addr[1]; +}; + +struct hfi_sys_ping_pkt { + struct hfi_pkt_hdr hdr; + u32 client_data; +}; + +struct hfi_session_init_pkt { + struct hfi_session_hdr_pkt shdr; + u32 session_domain; + u32 session_codec; +}; + +struct hfi_session_end_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_abort_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_set_property_pkt { + struct hfi_session_hdr_pkt shdr; + u32 num_properties; + u32 data[0]; +}; + +struct hfi_session_set_buffers_pkt { + struct hfi_session_hdr_pkt shdr; + u32 buffer_type; + u32 buffer_size; + u32 extradata_size; + u32 min_buffer_size; + u32 num_buffers; + u32 buffer_info[1]; +}; + +struct hfi_session_get_sequence_header_pkt { + struct hfi_session_hdr_pkt shdr; + u32 buffer_len; + u32 packet_buffer; +}; + +struct hfi_session_load_resources_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_start_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_stop_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_empty_buffer_compressed_pkt { + struct hfi_session_hdr_pkt shdr; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 flags; + u32 mark_target; + u32 mark_data; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 input_tag; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[1]; +}; + +struct hfi_session_empty_buffer_uncompressed_plane0_pkt { + struct hfi_session_hdr_pkt shdr; + u32 view_id; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 flags; + u32 mark_target; + u32 mark_data; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 input_tag; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[1]; +}; + +struct hfi_session_empty_buffer_uncompressed_plane1_pkt { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer2; + u32 data[1]; +}; + +struct hfi_session_empty_buffer_uncompressed_plane2_pkt { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer3; + u32 data[1]; +}; + +struct hfi_session_fill_buffer_pkt { + struct hfi_session_hdr_pkt shdr; + u32 stream_id; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 output_tag; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[1]; +}; + +struct hfi_session_flush_pkt { + struct hfi_session_hdr_pkt shdr; + u32 flush_type; +}; + +struct hfi_session_suspend_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_resume_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_get_property_pkt { + struct hfi_session_hdr_pkt shdr; + u32 num_properties; + u32 data[1]; +}; + +struct hfi_session_release_buffer_pkt { + struct hfi_session_hdr_pkt shdr; + u32 buffer_type; + u32 buffer_size; + u32 extradata_size; + u32 response_req; + u32 num_buffers; + u32 buffer_info[1]; +}; + +struct hfi_session_release_resources_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +struct hfi_session_parse_sequence_header_pkt { + struct hfi_session_hdr_pkt shdr; + u32 header_len; + u32 packet_buffer; +}; + +struct hfi_sfr { + u32 buf_size; + u8 data[1]; +}; + +struct hfi_sys_test_ssr_pkt { + struct hfi_pkt_hdr hdr; + u32 trigger_type; +}; + +void pkt_set_version(enum hfi_version version); + +void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type); +void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt); +void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable); +void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable); +int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size, + u32 addr, void *cookie); +int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id, + u32 size, void *cookie); +void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode, + u32 config); +void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode); +void pkt_sys_ping(struct hfi_sys_ping_pkt *pkt, u32 cookie); +void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt); +int pkt_sys_ssr_cmd(struct hfi_sys_test_ssr_pkt *pkt, u32 trigger_type); +int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie, + u32 session_type, u32 codec); +void pkt_session_cmd(struct hfi_session_pkt *pkt, u32 pkt_type, void *cookie); +int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt, + void *cookie, struct hfi_buffer_desc *bd); +int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt, + void *cookie, struct hfi_buffer_desc *bd); +int pkt_session_etb_decoder(struct hfi_session_empty_buffer_compressed_pkt *pkt, + void *cookie, struct hfi_frame_data *input_frame); +int pkt_session_etb_encoder( + struct hfi_session_empty_buffer_uncompressed_plane0_pkt *pkt, + void *cookie, struct hfi_frame_data *input_frame); +int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt, + void *cookie, struct hfi_frame_data *output_frame); +int pkt_session_parse_seq_header( + struct hfi_session_parse_sequence_header_pkt *pkt, + void *cookie, u32 seq_hdr, u32 seq_hdr_len); +int pkt_session_get_seq_hdr(struct hfi_session_get_sequence_header_pkt *pkt, + void *cookie, u32 seq_hdr, u32 seq_hdr_len); +int pkt_session_flush(struct hfi_session_flush_pkt *pkt, void *cookie, + u32 flush_mode); +int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt, + void *cookie, u32 ptype); +int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt, + void *cookie, u32 ptype, void *pdata); + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h new file mode 100644 index 000000000000..8d282dba9e57 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_helper.h @@ -0,0 +1,1050 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __VENUS_HFI_HELPER_H__ +#define __VENUS_HFI_HELPER_H__ + +#define HFI_DOMAIN_BASE_COMMON 0 + +#define HFI_DOMAIN_BASE_VDEC 0x1000000 +#define HFI_DOMAIN_BASE_VENC 0x2000000 +#define HFI_DOMAIN_BASE_VPE 0x3000000 + +#define HFI_VIDEO_ARCH_OX 0x1 + +#define HFI_ARCH_COMMON_OFFSET 0 +#define HFI_ARCH_OX_OFFSET 0x200000 + +#define HFI_OX_BASE 0x1000000 + +#define HFI_CMD_START_OFFSET 0x10000 +#define HFI_MSG_START_OFFSET 0x20000 + +#define HFI_ERR_NONE 0x0 +#define HFI_ERR_SYS_FATAL 0x1 +#define HFI_ERR_SYS_INVALID_PARAMETER 0x2 +#define HFI_ERR_SYS_VERSION_MISMATCH 0x3 +#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES 0x4 +#define HFI_ERR_SYS_MAX_SESSIONS_REACHED 0x5 +#define HFI_ERR_SYS_UNSUPPORTED_CODEC 0x6 +#define HFI_ERR_SYS_SESSION_IN_USE 0x7 +#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE 0x8 +#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN 0x9 + +#define HFI_ERR_SESSION_FATAL 0x1001 +#define HFI_ERR_SESSION_INVALID_PARAMETER 0x1002 +#define HFI_ERR_SESSION_BAD_POINTER 0x1003 +#define HFI_ERR_SESSION_INVALID_SESSION_ID 0x1004 +#define HFI_ERR_SESSION_INVALID_STREAM_ID 0x1005 +#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION 0x1006 +#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY 0x1007 +#define HFI_ERR_SESSION_UNSUPPORTED_SETTING 0x1008 +#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES 0x1009 +#define HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED 0x100a +#define HFI_ERR_SESSION_STREAM_CORRUPT 0x100b +#define HFI_ERR_SESSION_ENC_OVERFLOW 0x100c +#define HFI_ERR_SESSION_UNSUPPORTED_STREAM 0x100d +#define HFI_ERR_SESSION_CMDSIZE 0x100e +#define HFI_ERR_SESSION_UNSUPPORT_CMD 0x100f +#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE 0x1010 +#define HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL 0x1011 +#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR 0x1012 +#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED 0x1013 + +#define HFI_EVENT_SYS_ERROR 0x1 +#define HFI_EVENT_SESSION_ERROR 0x2 + +#define HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES 0x1000001 +#define HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES 0x1000002 +#define HFI_EVENT_SESSION_SEQUENCE_CHANGED 0x1000003 +#define HFI_EVENT_SESSION_PROPERTY_CHANGED 0x1000004 +#define HFI_EVENT_SESSION_LTRUSE_FAILED 0x1000005 +#define HFI_EVENT_RELEASE_BUFFER_REFERENCE 0x1000006 + +#define HFI_BUFFERFLAG_EOS 0x00000001 +#define HFI_BUFFERFLAG_STARTTIME 0x00000002 +#define HFI_BUFFERFLAG_DECODEONLY 0x00000004 +#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008 +#define HFI_BUFFERFLAG_ENDOFFRAME 0x00000010 +#define HFI_BUFFERFLAG_SYNCFRAME 0x00000020 +#define HFI_BUFFERFLAG_EXTRADATA 0x00000040 +#define HFI_BUFFERFLAG_CODECCONFIG 0x00000080 +#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100 +#define HFI_BUFFERFLAG_READONLY 0x00000200 +#define HFI_BUFFERFLAG_ENDOFSUBFRAME 0x00000400 +#define HFI_BUFFERFLAG_EOSEQ 0x00200000 +#define HFI_BUFFERFLAG_MBAFF 0x08000000 +#define HFI_BUFFERFLAG_VPE_YUV_601_709_CSC_CLAMP 0x10000000 +#define HFI_BUFFERFLAG_DROP_FRAME 0x20000000 +#define HFI_BUFFERFLAG_TEI 0x40000000 +#define HFI_BUFFERFLAG_DISCONTINUITY 0x80000000 + +#define HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING 0x1001001 +#define HFI_ERR_SESSION_SAME_STATE_OPERATION 0x1001002 +#define HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED 0x1001003 +#define HFI_ERR_SESSION_START_CODE_NOT_FOUND 0x1001004 + +#define HFI_FLUSH_INPUT 0x1000001 +#define HFI_FLUSH_OUTPUT 0x1000002 +#define HFI_FLUSH_OUTPUT2 0x1000003 +#define HFI_FLUSH_ALL 0x1000004 + +#define HFI_EXTRADATA_NONE 0x00000000 +#define HFI_EXTRADATA_MB_QUANTIZATION 0x00000001 +#define HFI_EXTRADATA_INTERLACE_VIDEO 0x00000002 +#define HFI_EXTRADATA_VC1_FRAMEDISP 0x00000003 +#define HFI_EXTRADATA_VC1_SEQDISP 0x00000004 +#define HFI_EXTRADATA_TIMESTAMP 0x00000005 +#define HFI_EXTRADATA_S3D_FRAME_PACKING 0x00000006 +#define HFI_EXTRADATA_FRAME_RATE 0x00000007 +#define HFI_EXTRADATA_PANSCAN_WINDOW 0x00000008 +#define HFI_EXTRADATA_RECOVERY_POINT_SEI 0x00000009 +#define HFI_EXTRADATA_MPEG2_SEQDISP 0x0000000d +#define HFI_EXTRADATA_STREAM_USERDATA 0x0000000e +#define HFI_EXTRADATA_FRAME_QP 0x0000000f +#define HFI_EXTRADATA_FRAME_BITS_INFO 0x00000010 +#define HFI_EXTRADATA_MULTISLICE_INFO 0x7f100000 +#define HFI_EXTRADATA_NUM_CONCEALED_MB 0x7f100001 +#define HFI_EXTRADATA_INDEX 0x7f100002 +#define HFI_EXTRADATA_METADATA_LTR 0x7f100004 +#define HFI_EXTRADATA_METADATA_FILLER 0x7fe00002 + +#define HFI_INDEX_EXTRADATA_INPUT_CROP 0x0700000e +#define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM 0x07000010 +#define HFI_INDEX_EXTRADATA_ASPECT_RATIO 0x7f100003 + +#define HFI_INTERLACE_FRAME_PROGRESSIVE 0x01 +#define HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST 0x02 +#define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST 0x04 +#define HFI_INTERLACE_FRAME_TOPFIELDFIRST 0x08 +#define HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST 0x10 + +/* + * HFI_PROPERTY_PARAM_OX_START + * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000 + */ +#define HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL 0x201001 +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO 0x201002 +#define HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED 0x201003 +#define HFI_PROPERTY_PARAM_CHROMA_SITE 0x201004 +#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG 0x201005 +#define HFI_PROPERTY_PARAM_INDEX_EXTRADATA 0x201006 +#define HFI_PROPERTY_PARAM_DIVX_FORMAT 0x201007 +#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE 0x201008 +#define HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA 0x201009 +#define HFI_PROPERTY_PARAM_ERR_DETECTION_CODE_EXTRADATA 0x20100a +#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED 0x20100b +#define HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL 0x20100c +#define HFI_PROPERTY_PARAM_BUFFER_DISPLAY_HOLD_COUNT_ACTUAL 0x20100d + +/* + * HFI_PROPERTY_CONFIG_OX_START + * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x2000 + */ +#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS 0x202001 +#define HFI_PROPERTY_CONFIG_REALTIME 0x202002 +#define HFI_PROPERTY_CONFIG_PRIORITY 0x202003 +#define HFI_PROPERTY_CONFIG_BATCH_INFO 0x202004 + +/* + * HFI_PROPERTY_PARAM_VDEC_OX_START \ + * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x3000 + */ +#define HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER 0x1203001 +#define HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT 0x1203002 +#define HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT 0x1203003 +#define HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE 0x1203004 +#define HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER 0x1203005 +#define HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION 0x1203006 +#define HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB 0x1203007 +#define HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING 0x1203008 +#define HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO 0x1203009 +#define HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA 0x120300a +#define HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA 0x120300b +#define HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA 0x120300c +#define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE 0x120300d +#define HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY 0x120300e +#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA 0x1203011 +#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA 0x1203012 +#define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA 0x1203013 +#define HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA 0x1203014 +#define HFI_PROPERTY_PARAM_VDEC_AVC_SESSION_SELECT 0x1203015 +#define HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA 0x1203016 +#define HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA 0x1203017 +#define HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA 0x1203018 +#define HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA 0x1203019 +#define HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD 0x120301a + +/* + * HFI_PROPERTY_CONFIG_VDEC_OX_START + * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x0000 + */ +#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER 0x1200001 +#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING 0x1200002 +#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP 0x1200003 + +#define HFI_PROPERTY_CONFIG_VDEC_ENTROPY 0x1204004 + +/* + * HFI_PROPERTY_PARAM_VENC_OX_START + * HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000 + */ +#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO 0x2205001 +#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL 0x2205002 +#define HFI_PROPERTY_PARAM_VENC_LTR_INFO 0x2205003 +#define HFI_PROPERTY_PARAM_VENC_MBI_DUMPING 0x2205005 + +/* + * HFI_PROPERTY_CONFIG_VENC_OX_START + * HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000 + */ +#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP 0x2206001 + +/* + * HFI_PROPERTY_PARAM_VPE_OX_START + * HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x7000 + */ +#define HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION 0x3207001 + +#define HFI_PROPERTY_CONFIG_VPE_OX_START \ + (HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x8000) + +#define HFI_CHROMA_SITE_0 0x1000001 +#define HFI_CHROMA_SITE_1 0x1000002 +#define HFI_CHROMA_SITE_2 0x1000003 +#define HFI_CHROMA_SITE_3 0x1000004 +#define HFI_CHROMA_SITE_4 0x1000005 +#define HFI_CHROMA_SITE_5 0x1000006 + +#define HFI_PRIORITY_LOW 10 +#define HFI_PRIOIRTY_MEDIUM 20 +#define HFI_PRIORITY_HIGH 30 + +#define HFI_OUTPUT_ORDER_DISPLAY 0x1000001 +#define HFI_OUTPUT_ORDER_DECODE 0x1000002 + +#define HFI_RATE_CONTROL_OFF 0x1000001 +#define HFI_RATE_CONTROL_VBR_VFR 0x1000002 +#define HFI_RATE_CONTROL_VBR_CFR 0x1000003 +#define HFI_RATE_CONTROL_CBR_VFR 0x1000004 +#define HFI_RATE_CONTROL_CBR_CFR 0x1000005 + +#define HFI_VIDEO_CODEC_H264 0x00000002 +#define HFI_VIDEO_CODEC_H263 0x00000004 +#define HFI_VIDEO_CODEC_MPEG1 0x00000008 +#define HFI_VIDEO_CODEC_MPEG2 0x00000010 +#define HFI_VIDEO_CODEC_MPEG4 0x00000020 +#define HFI_VIDEO_CODEC_DIVX_311 0x00000040 +#define HFI_VIDEO_CODEC_DIVX 0x00000080 +#define HFI_VIDEO_CODEC_VC1 0x00000100 +#define HFI_VIDEO_CODEC_SPARK 0x00000200 +#define HFI_VIDEO_CODEC_VP8 0x00001000 +#define HFI_VIDEO_CODEC_HEVC 0x00002000 +#define HFI_VIDEO_CODEC_VP9 0x00004000 +#define HFI_VIDEO_CODEC_HEVC_HYBRID 0x80000000 + +#define HFI_H264_PROFILE_BASELINE 0x00000001 +#define HFI_H264_PROFILE_MAIN 0x00000002 +#define HFI_H264_PROFILE_HIGH 0x00000004 +#define HFI_H264_PROFILE_STEREO_HIGH 0x00000008 +#define HFI_H264_PROFILE_MULTIVIEW_HIGH 0x00000010 +#define HFI_H264_PROFILE_CONSTRAINED_BASE 0x00000020 +#define HFI_H264_PROFILE_CONSTRAINED_HIGH 0x00000040 + +#define HFI_H264_LEVEL_1 0x00000001 +#define HFI_H264_LEVEL_1b 0x00000002 +#define HFI_H264_LEVEL_11 0x00000004 +#define HFI_H264_LEVEL_12 0x00000008 +#define HFI_H264_LEVEL_13 0x00000010 +#define HFI_H264_LEVEL_2 0x00000020 +#define HFI_H264_LEVEL_21 0x00000040 +#define HFI_H264_LEVEL_22 0x00000080 +#define HFI_H264_LEVEL_3 0x00000100 +#define HFI_H264_LEVEL_31 0x00000200 +#define HFI_H264_LEVEL_32 0x00000400 +#define HFI_H264_LEVEL_4 0x00000800 +#define HFI_H264_LEVEL_41 0x00001000 +#define HFI_H264_LEVEL_42 0x00002000 +#define HFI_H264_LEVEL_5 0x00004000 +#define HFI_H264_LEVEL_51 0x00008000 +#define HFI_H264_LEVEL_52 0x00010000 + +#define HFI_H263_PROFILE_BASELINE 0x00000001 + +#define HFI_H263_LEVEL_10 0x00000001 +#define HFI_H263_LEVEL_20 0x00000002 +#define HFI_H263_LEVEL_30 0x00000004 +#define HFI_H263_LEVEL_40 0x00000008 +#define HFI_H263_LEVEL_45 0x00000010 +#define HFI_H263_LEVEL_50 0x00000020 +#define HFI_H263_LEVEL_60 0x00000040 +#define HFI_H263_LEVEL_70 0x00000080 + +#define HFI_MPEG2_PROFILE_SIMPLE 0x00000001 +#define HFI_MPEG2_PROFILE_MAIN 0x00000002 +#define HFI_MPEG2_PROFILE_422 0x00000004 +#define HFI_MPEG2_PROFILE_SNR 0x00000008 +#define HFI_MPEG2_PROFILE_SPATIAL 0x00000010 +#define HFI_MPEG2_PROFILE_HIGH 0x00000020 + +#define HFI_MPEG2_LEVEL_LL 0x00000001 +#define HFI_MPEG2_LEVEL_ML 0x00000002 +#define HFI_MPEG2_LEVEL_H14 0x00000004 +#define HFI_MPEG2_LEVEL_HL 0x00000008 + +#define HFI_MPEG4_PROFILE_SIMPLE 0x00000001 +#define HFI_MPEG4_PROFILE_ADVANCEDSIMPLE 0x00000002 + +#define HFI_MPEG4_LEVEL_0 0x00000001 +#define HFI_MPEG4_LEVEL_0b 0x00000002 +#define HFI_MPEG4_LEVEL_1 0x00000004 +#define HFI_MPEG4_LEVEL_2 0x00000008 +#define HFI_MPEG4_LEVEL_3 0x00000010 +#define HFI_MPEG4_LEVEL_4 0x00000020 +#define HFI_MPEG4_LEVEL_4a 0x00000040 +#define HFI_MPEG4_LEVEL_5 0x00000080 +#define HFI_MPEG4_LEVEL_6 0x00000100 +#define HFI_MPEG4_LEVEL_7 0x00000200 +#define HFI_MPEG4_LEVEL_8 0x00000400 +#define HFI_MPEG4_LEVEL_9 0x00000800 +#define HFI_MPEG4_LEVEL_3b 0x00001000 + +#define HFI_VC1_PROFILE_SIMPLE 0x00000001 +#define HFI_VC1_PROFILE_MAIN 0x00000002 +#define HFI_VC1_PROFILE_ADVANCED 0x00000004 + +#define HFI_VC1_LEVEL_LOW 0x00000001 +#define HFI_VC1_LEVEL_MEDIUM 0x00000002 +#define HFI_VC1_LEVEL_HIGH 0x00000004 +#define HFI_VC1_LEVEL_0 0x00000008 +#define HFI_VC1_LEVEL_1 0x00000010 +#define HFI_VC1_LEVEL_2 0x00000020 +#define HFI_VC1_LEVEL_3 0x00000040 +#define HFI_VC1_LEVEL_4 0x00000080 + +#define HFI_VPX_PROFILE_SIMPLE 0x00000001 +#define HFI_VPX_PROFILE_ADVANCED 0x00000002 +#define HFI_VPX_PROFILE_VERSION_0 0x00000004 +#define HFI_VPX_PROFILE_VERSION_1 0x00000008 +#define HFI_VPX_PROFILE_VERSION_2 0x00000010 +#define HFI_VPX_PROFILE_VERSION_3 0x00000020 + +#define HFI_DIVX_FORMAT_4 0x1 +#define HFI_DIVX_FORMAT_5 0x2 +#define HFI_DIVX_FORMAT_6 0x3 + +#define HFI_DIVX_PROFILE_QMOBILE 0x00000001 +#define HFI_DIVX_PROFILE_MOBILE 0x00000002 +#define HFI_DIVX_PROFILE_MT 0x00000004 +#define HFI_DIVX_PROFILE_HT 0x00000008 +#define HFI_DIVX_PROFILE_HD 0x00000010 + +#define HFI_HEVC_PROFILE_MAIN 0x00000001 +#define HFI_HEVC_PROFILE_MAIN10 0x00000002 +#define HFI_HEVC_PROFILE_MAIN_STILL_PIC 0x00000004 + +#define HFI_HEVC_LEVEL_1 0x00000001 +#define HFI_HEVC_LEVEL_2 0x00000002 +#define HFI_HEVC_LEVEL_21 0x00000004 +#define HFI_HEVC_LEVEL_3 0x00000008 +#define HFI_HEVC_LEVEL_31 0x00000010 +#define HFI_HEVC_LEVEL_4 0x00000020 +#define HFI_HEVC_LEVEL_41 0x00000040 +#define HFI_HEVC_LEVEL_5 0x00000080 +#define HFI_HEVC_LEVEL_51 0x00000100 +#define HFI_HEVC_LEVEL_52 0x00000200 +#define HFI_HEVC_LEVEL_6 0x00000400 +#define HFI_HEVC_LEVEL_61 0x00000800 +#define HFI_HEVC_LEVEL_62 0x00001000 + +#define HFI_HEVC_TIER_MAIN 0x1 +#define HFI_HEVC_TIER_HIGH0 0x2 + +#define HFI_BUFFER_INPUT 0x1 +#define HFI_BUFFER_OUTPUT 0x2 +#define HFI_BUFFER_OUTPUT2 0x3 +#define HFI_BUFFER_INTERNAL_PERSIST 0x4 +#define HFI_BUFFER_INTERNAL_PERSIST_1 0x5 +#define HFI_BUFFER_INTERNAL_SCRATCH 0x1000001 +#define HFI_BUFFER_EXTRADATA_INPUT 0x1000002 +#define HFI_BUFFER_EXTRADATA_OUTPUT 0x1000003 +#define HFI_BUFFER_EXTRADATA_OUTPUT2 0x1000004 +#define HFI_BUFFER_INTERNAL_SCRATCH_1 0x1000005 +#define HFI_BUFFER_INTERNAL_SCRATCH_2 0x1000006 + +#define HFI_BUFFER_TYPE_MAX 11 + +#define HFI_BUFFER_MODE_STATIC 0x1000001 +#define HFI_BUFFER_MODE_RING 0x1000002 +#define HFI_BUFFER_MODE_DYNAMIC 0x1000003 + +#define HFI_VENC_PERFMODE_MAX_QUALITY 0x1 +#define HFI_VENC_PERFMODE_POWER_SAVE 0x2 + +/* + * HFI_PROPERTY_SYS_COMMON_START + * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000 + */ +#define HFI_PROPERTY_SYS_DEBUG_CONFIG 0x1 +#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO 0x2 +#define HFI_PROPERTY_SYS_CONFIG_VCODEC_CLKFREQ 0x3 +#define HFI_PROPERTY_SYS_IDLE_INDICATOR 0x4 +#define HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL 0x5 +#define HFI_PROPERTY_SYS_IMAGE_VERSION 0x6 +#define HFI_PROPERTY_SYS_CONFIG_COVERAGE 0x7 + +/* + * HFI_PROPERTY_PARAM_COMMON_START + * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000 + */ +#define HFI_PROPERTY_PARAM_FRAME_SIZE 0x1001 +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO 0x1002 +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT 0x1003 +#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED 0x1004 +#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT 0x1005 +#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED 0x1006 +#define HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED 0x1007 +#define HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED 0x1008 +#define HFI_PROPERTY_PARAM_CODEC_SUPPORTED 0x1009 +#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED 0x100a +#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT 0x100b +#define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT 0x100c +#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE 0x100d +#define HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED 0x100e +#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT 0x100f +#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED 0x1010 + +/* + * HFI_PROPERTY_CONFIG_COMMON_START + * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000 + */ +#define HFI_PROPERTY_CONFIG_FRAME_RATE 0x2001 + +/* + * HFI_PROPERTY_PARAM_VDEC_COMMON_START + * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000 + */ +#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM 0x1003001 +#define HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR 0x1003002 +#define HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2 0x1003003 + +/* + * HFI_PROPERTY_CONFIG_VDEC_COMMON_START + * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000 + */ + +/* + * HFI_PROPERTY_PARAM_VENC_COMMON_START + * HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x5000 + */ +#define HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE 0x2005001 +#define HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL 0x2005002 +#define HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL 0x2005003 +#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL 0x2005004 +#define HFI_PROPERTY_PARAM_VENC_H264_PICORDER_CNT_TYPE 0x2005005 +#define HFI_PROPERTY_PARAM_VENC_SESSION_QP 0x2005006 +#define HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION 0x2005007 +#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE 0x2005008 +#define HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION 0x2005009 +#define HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER 0x200500a +#define HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION 0x200500b +#define HFI_PROPERTY_PARAM_VENC_OPEN_GOP 0x200500c +#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH 0x200500d +#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL 0x200500e +#define HFI_PROPERTY_PARAM_VENC_VBV_HRD_BUF_SIZE 0x200500f +#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED 0x2005010 +#define HFI_PROPERTY_PARAM_VENC_ADVANCED 0x2005012 +#define HFI_PROPERTY_PARAM_VENC_H264_SPS_ID 0x2005014 +#define HFI_PROPERTY_PARAM_VENC_H264_PPS_ID 0x2005015 +#define HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL 0x2005016 +#define HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO 0x2005017 +#define HFI_PROPERTY_PARAM_VENC_NUMREF 0x2005018 +#define HFI_PROPERTY_PARAM_VENC_MULTIREF_P 0x2005019 +#define HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT 0x200501b +#define HFI_PROPERTY_PARAM_VENC_LTRMODE 0x200501c +#define HFI_PROPERTY_PARAM_VENC_VIDEO_FULL_RANGE 0x200501d +#define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO 0x200501e +#define HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG 0x200501f +#define HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES 0x2005020 +#define HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC 0x2005021 +#define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY 0x2005023 +#define HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER 0x2005026 +#define HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP 0x2005027 +#define HFI_PROPERTY_PARAM_VENC_INITIAL_QP 0x2005028 +#define HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE 0x2005029 +#define HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER 0x200502c +#define HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE 0x200502f + +/* + * HFI_PROPERTY_CONFIG_VENC_COMMON_START + * HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000 + */ +#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE 0x2006001 +#define HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD 0x2006002 +#define HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD 0x2006003 +#define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME 0x2006004 +#define HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE 0x2006005 +#define HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE 0x2006007 +#define HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER 0x2006008 +#define HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME 0x2006009 +#define HFI_PROPERTY_CONFIG_VENC_USELTRFRAME 0x200600a +#define HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER 0x200600b +#define HFI_PROPERTY_CONFIG_VENC_LTRPERIOD 0x200600c +#define HFI_PROPERTY_CONFIG_VENC_PERF_MODE 0x200600e + +/* + * HFI_PROPERTY_PARAM_VPE_COMMON_START + * HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000 + */ + +/* + * HFI_PROPERTY_CONFIG_VPE_COMMON_START + * HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000 + */ +#define HFI_PROPERTY_CONFIG_VPE_DEINTERLACE 0x3008001 +#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS 0x3008002 + +enum hfi_version { + HFI_VERSION_1XX, + HFI_VERSION_3XX, +}; + +struct hfi_buffer_info { + u32 buffer_addr; + u32 extradata_addr; +}; + +struct hfi_bitrate { + u32 bitrate; + u32 layer_id; +}; + +#define HFI_CAPABILITY_FRAME_WIDTH 0x01 +#define HFI_CAPABILITY_FRAME_HEIGHT 0x02 +#define HFI_CAPABILITY_MBS_PER_FRAME 0x03 +#define HFI_CAPABILITY_MBS_PER_SECOND 0x04 +#define HFI_CAPABILITY_FRAMERATE 0x05 +#define HFI_CAPABILITY_SCALE_X 0x06 +#define HFI_CAPABILITY_SCALE_Y 0x07 +#define HFI_CAPABILITY_BITRATE 0x08 +#define HFI_CAPABILITY_BFRAME 0x09 +#define HFI_CAPABILITY_PEAKBITRATE 0x0a +#define HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS 0x10 +#define HFI_CAPABILITY_ENC_LTR_COUNT 0x11 +#define HFI_CAPABILITY_CP_OUTPUT2_THRESH 0x12 +#define HFI_CAPABILITY_HIER_B_NUM_ENH_LAYERS 0x13 +#define HFI_CAPABILITY_LCU_SIZE 0x14 +#define HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS 0x15 +#define HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE 0x16 + +struct hfi_capability { + u32 capability_type; + u32 min; + u32 max; + u32 step_size; +}; + +struct hfi_capabilities { + u32 num_capabilities; + struct hfi_capability data[1]; +}; + +#define HFI_DEBUG_MSG_LOW 0x01 +#define HFI_DEBUG_MSG_MEDIUM 0x02 +#define HFI_DEBUG_MSG_HIGH 0x04 +#define HFI_DEBUG_MSG_ERROR 0x08 +#define HFI_DEBUG_MSG_FATAL 0x10 +#define HFI_DEBUG_MSG_PERF 0x20 + +#define HFI_DEBUG_MODE_QUEUE 0x01 +#define HFI_DEBUG_MODE_QDSS 0x02 + +struct hfi_debug_config { + u32 config; + u32 mode; +}; + +struct hfi_enable { + u32 enable; +}; + +#define HFI_H264_DB_MODE_DISABLE 0x1 +#define HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY 0x2 +#define HFI_H264_DB_MODE_ALL_BOUNDARY 0x3 + +struct hfi_h264_db_control { + u32 mode; + u32 slice_alpha_offset; + u32 slice_beta_offset; +}; + +#define HFI_H264_ENTROPY_CAVLC 0x1 +#define HFI_H264_ENTROPY_CABAC 0x2 + +#define HFI_H264_CABAC_MODEL_0 0x1 +#define HFI_H264_CABAC_MODEL_1 0x2 +#define HFI_H264_CABAC_MODEL_2 0x3 + +struct hfi_h264_entropy_control { + u32 entropy_mode; + u32 cabac_model; +}; + +struct hfi_framerate { + u32 buffer_type; + u32 framerate; +}; + +#define HFI_INTRA_REFRESH_NONE 0x1 +#define HFI_INTRA_REFRESH_CYCLIC 0x2 +#define HFI_INTRA_REFRESH_ADAPTIVE 0x3 +#define HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE 0x4 +#define HFI_INTRA_REFRESH_RANDOM 0x5 + +struct hfi_intra_refresh { + u32 mode; + u32 air_mbs; + u32 air_ref; + u32 cir_mbs; +}; + +struct hfi_intra_refresh_3x { + u32 mode; + u32 mbs; +}; + +struct hfi_idr_period { + u32 idr_period; +}; + +struct hfi_operations_type { + u32 rotation; + u32 flip; +}; + +struct hfi_max_num_b_frames { + u32 max_num_b_frames; +}; + +struct hfi_vc1e_perf_cfg_type { + u32 search_range_x_subsampled[3]; + u32 search_range_y_subsampled[3]; +}; + +struct hfi_conceal_color { + u32 conceal_color; +}; + +struct hfi_intra_period { + u32 pframes; + u32 bframes; +}; + +struct hfi_mpeg4_header_extension { + u32 header_extension; +}; + +struct hfi_mpeg4_time_resolution { + u32 time_increment_resolution; +}; + +struct hfi_multi_stream { + u32 buffer_type; + u32 enable; + u32 width; + u32 height; +}; + +struct hfi_multi_stream_3x { + u32 buffer_type; + u32 enable; +}; + +struct hfi_multi_view_format { + u32 views; + u32 view_order[1]; +}; + +#define HFI_MULTI_SLICE_OFF 0x1 +#define HFI_MULTI_SLICE_BY_MB_COUNT 0x2 +#define HFI_MULTI_SLICE_BY_BYTE_COUNT 0x3 +#define HFI_MULTI_SLICE_GOB 0x4 + +struct hfi_multi_slice_control { + u32 multi_slice; + u32 slice_size; +}; + +#define HFI_NAL_FORMAT_STARTCODES 0x01 +#define HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER 0x02 +#define HFI_NAL_FORMAT_ONE_BYTE_LENGTH 0x04 +#define HFI_NAL_FORMAT_TWO_BYTE_LENGTH 0x08 +#define HFI_NAL_FORMAT_FOUR_BYTE_LENGTH 0x10 + +struct hfi_nal_stream_format { + u32 format; +}; + +struct hfi_nal_stream_format_select { + u32 format; +}; + +#define HFI_PICTURE_TYPE_I 0x01 +#define HFI_PICTURE_TYPE_P 0x02 +#define HFI_PICTURE_TYPE_B 0x04 +#define HFI_PICTURE_TYPE_IDR 0x08 + +struct hfi_profile_level { + u32 profile; + u32 level; +}; + +#define HFI_MAX_PROFILE_COUNT 16 + +struct hfi_profile_level_supported { + u32 profile_count; + struct hfi_profile_level profile_level[1]; +}; + +struct hfi_quality_vs_speed { + u32 quality_vs_speed; +}; + +struct hfi_quantization { + u32 qp_i; + u32 qp_p; + u32 qp_b; + u32 layer_id; +}; + +struct hfi_initial_quantization { + u32 qp_i; + u32 qp_p; + u32 qp_b; + u32 init_qp_enable; +}; + +struct hfi_quantization_range { + u32 min_qp; + u32 max_qp; + u32 layer_id; +}; + +#define HFI_LTR_MODE_DISABLE 0x0 +#define HFI_LTR_MODE_MANUAL 0x1 +#define HFI_LTR_MODE_PERIODIC 0x2 + +struct hfi_ltr_mode { + u32 ltr_mode; + u32 ltr_count; + u32 trust_mode; +}; + +struct hfi_ltr_use { + u32 ref_ltr; + u32 use_constrnt; + u32 frames; +}; + +struct hfi_ltr_mark { + u32 mark_frame; +}; + +struct hfi_framesize { + u32 buffer_type; + u32 width; + u32 height; +}; + +struct hfi_h264_vui_timing_info { + u32 enable; + u32 fixed_framerate; + u32 time_scale; +}; + +#define HFI_COLOR_FORMAT_MONOCHROME 0x01 +#define HFI_COLOR_FORMAT_NV12 0x02 +#define HFI_COLOR_FORMAT_NV21 0x03 +#define HFI_COLOR_FORMAT_NV12_4x4TILE 0x04 +#define HFI_COLOR_FORMAT_NV21_4x4TILE 0x05 +#define HFI_COLOR_FORMAT_YUYV 0x06 +#define HFI_COLOR_FORMAT_YVYU 0x07 +#define HFI_COLOR_FORMAT_UYVY 0x08 +#define HFI_COLOR_FORMAT_VYUY 0x09 +#define HFI_COLOR_FORMAT_RGB565 0x0a +#define HFI_COLOR_FORMAT_BGR565 0x0b +#define HFI_COLOR_FORMAT_RGB888 0x0c +#define HFI_COLOR_FORMAT_BGR888 0x0d +#define HFI_COLOR_FORMAT_YUV444 0x0e +#define HFI_COLOR_FORMAT_RGBA8888 0x10 + +#define HFI_COLOR_FORMAT_UBWC_BASE 0x8000 +#define HFI_COLOR_FORMAT_10_BIT_BASE 0x4000 + +#define HFI_COLOR_FORMAT_YUV420_TP10 0x4002 +#define HFI_COLOR_FORMAT_NV12_UBWC 0x8002 +#define HFI_COLOR_FORMAT_YUV420_TP10_UBWC 0xc002 +#define HFI_COLOR_FORMAT_RGBA8888_UBWC 0x8010 + +struct hfi_uncompressed_format_select { + u32 buffer_type; + u32 format; +}; + +struct hfi_uncompressed_format_supported { + u32 buffer_type; + u32 format_entries; + u32 format_info[1]; +}; + +struct hfi_uncompressed_plane_actual { + int actual_stride; + u32 actual_plane_buffer_height; +}; + +struct hfi_uncompressed_plane_actual_info { + u32 buffer_type; + u32 num_planes; + struct hfi_uncompressed_plane_actual plane_format[1]; +}; + +struct hfi_uncompressed_plane_constraints { + u32 stride_multiples; + u32 max_stride; + u32 min_plane_buffer_height_multiple; + u32 buffer_alignment; +}; + +struct hfi_uncompressed_plane_info { + u32 format; + u32 num_planes; + struct hfi_uncompressed_plane_constraints plane_format[1]; +}; + +struct hfi_uncompressed_plane_actual_constraints_info { + u32 buffer_type; + u32 num_planes; + struct hfi_uncompressed_plane_constraints plane_format[1]; +}; + +struct hfi_codec_supported { + u32 dec_codecs; + u32 enc_codecs; +}; + +struct hfi_properties_supported { + u32 num_properties; + u32 properties[1]; +}; + +struct hfi_max_sessions_supported { + u32 max_sessions; +}; + +#define HFI_MAX_MATRIX_COEFFS 9 +#define HFI_MAX_BIAS_COEFFS 3 +#define HFI_MAX_LIMIT_COEFFS 6 + +struct hfi_vpe_color_space_conversion { + u32 csc_matrix[HFI_MAX_MATRIX_COEFFS]; + u32 csc_bias[HFI_MAX_BIAS_COEFFS]; + u32 csc_limit[HFI_MAX_LIMIT_COEFFS]; +}; + +#define HFI_ROTATE_NONE 0x1 +#define HFI_ROTATE_90 0x2 +#define HFI_ROTATE_180 0x3 +#define HFI_ROTATE_270 0x4 + +#define HFI_FLIP_NONE 0x1 +#define HFI_FLIP_HORIZONTAL 0x2 +#define HFI_FLIP_VERTICAL 0x3 + +struct hfi_operations { + u32 rotate; + u32 flip; +}; + +#define HFI_RESOURCE_OCMEM 0x1 + +struct hfi_resource_ocmem { + u32 size; + u32 mem; +}; + +struct hfi_resource_ocmem_requirement { + u32 session_domain; + u32 width; + u32 height; + u32 size; +}; + +struct hfi_resource_ocmem_requirement_info { + u32 num_entries; + struct hfi_resource_ocmem_requirement requirements[1]; +}; + +struct hfi_property_sys_image_version_info_type { + u32 string_size; + u8 str_image_version[1]; +}; + +struct hfi_codec_mask_supported { + u32 codecs; + u32 video_domains; +}; + +struct hfi_seq_header_info { + u32 max_hader_len; +}; + +struct hfi_aspect_ratio { + u32 aspect_width; + u32 aspect_height; +}; + +#define HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM 0 +#define HFI_MVC_BUFFER_LAYOUT_SIDEBYSIDE 1 +#define HFI_MVC_BUFFER_LAYOUT_SEQ 2 + +struct hfi_mvc_buffer_layout_descp_type { + u32 layout_type; + u32 bright_view_first; + u32 ngap; +}; + +struct hfi_scs_threshold { + u32 threshold_value; +}; + +#define HFI_TEST_SSR_SW_ERR_FATAL 0x1 +#define HFI_TEST_SSR_SW_DIV_BY_ZERO 0x2 +#define HFI_TEST_SSR_HW_WDOG_IRQ 0x3 + +struct hfi_buffer_alloc_mode { + u32 type; + u32 mode; +}; + +struct hfi_index_extradata_config { + u32 enable; + u32 index_extra_data_id; +}; + +struct hfi_extradata_header { + u32 size; + u32 version; + u32 port_index; + u32 type; + u32 data_size; + u8 data[1]; +}; + +struct hfi_batch_info { + u32 input_batch_count; + u32 output_batch_count; +}; + +struct hfi_buffer_count_actual { + u32 type; + u32 count_actual; +}; + +struct hfi_buffer_size_actual { + u32 type; + u32 size; +}; + +struct hfi_buffer_display_hold_count_actual { + u32 type; + u32 hold_count; +}; + +struct hfi_buffer_requirements { + u32 type; + u32 size; + u32 region_size; + u32 hold_count; + u32 count_min; + u32 count_actual; + u32 contiguous; + u32 alignment; +}; + +struct hfi_data_payload { + u32 size; + u8 data[1]; +}; + +struct hfi_enable_picture { + u32 picture_type; +}; + +struct hfi_display_picture_buffer_count { + int enable; + u32 count; +}; + +struct hfi_extra_data_header_config { + u32 type; + u32 buffer_type; + u32 version; + u32 port_index; + u32 client_extra_data_id; +}; + +struct hfi_interlace_format_supported { + u32 buffer_type; + u32 format; +}; + +struct hfi_buffer_alloc_mode_supported { + u32 buffer_type; + u32 num_entries; + u32 data[1]; +}; + +struct hfi_mb_error_map { + u32 error_map_size; + u8 error_map[1]; +}; + +struct hfi_metadata_pass_through { + int enable; + u32 size; +}; + +struct hfi_multi_view_select { + u32 view_index; +}; + +struct hfi_hybrid_hierp { + u32 layers; +}; + +struct hfi_pkt_hdr { + u32 size; + u32 pkt_type; +}; + +struct hfi_session_hdr_pkt { + struct hfi_pkt_hdr hdr; + u32 session_id; +}; + +struct hfi_session_pkt { + struct hfi_session_hdr_pkt shdr; +}; + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c new file mode 100644 index 000000000000..a681ae5381d6 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_msgs.c @@ -0,0 +1,1053 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/hash.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <media/videobuf2-v4l2.h> + +#include "core.h" +#include "hfi.h" +#include "hfi_helper.h" +#include "hfi_msgs.h" + +static void event_seq_changed(struct venus_core *core, struct venus_inst *inst, + struct hfi_msg_event_notify_pkt *pkt) +{ + struct hfi_event_data event = {0}; + int num_properties_changed; + struct hfi_framesize *frame_sz; + struct hfi_profile_level *profile_level; + u8 *data_ptr; + u32 ptype; + + inst->error = HFI_ERR_NONE; + + switch (pkt->event_data1) { + case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES: + case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES: + break; + default: + inst->error = HFI_ERR_SESSION_INVALID_PARAMETER; + goto done; + } + + event.event_type = pkt->event_data1; + + num_properties_changed = pkt->event_data2; + if (!num_properties_changed) { + inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES; + goto done; + } + + data_ptr = (u8 *)&pkt->ext_event_data[0]; + do { + ptype = *((u32 *)data_ptr); + switch (ptype) { + case HFI_PROPERTY_PARAM_FRAME_SIZE: + data_ptr += sizeof(u32); + frame_sz = (struct hfi_framesize *)data_ptr; + event.width = frame_sz->width; + event.height = frame_sz->height; + data_ptr += sizeof(frame_sz); + break; + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: + data_ptr += sizeof(u32); + profile_level = (struct hfi_profile_level *)data_ptr; + event.profile = profile_level->profile; + event.level = profile_level->level; + data_ptr += sizeof(profile_level); + break; + default: + break; + } + num_properties_changed--; + } while (num_properties_changed > 0); + +done: + inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); +} + +static void event_release_buffer_ref(struct venus_core *core, + struct venus_inst *inst, + struct hfi_msg_event_notify_pkt *pkt) +{ + struct hfi_event_data event = {0}; + struct hfi_msg_event_release_buffer_ref_pkt *data; + + data = (struct hfi_msg_event_release_buffer_ref_pkt *) + pkt->ext_event_data; + + event.event_type = HFI_EVENT_RELEASE_BUFFER_REFERENCE; + event.packet_buffer = data->packet_buffer; + event.extradata_buffer = data->extradata_buffer; + event.tag = data->output_tag; + + inst->error = HFI_ERR_NONE; + inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event); +} + +static void event_sys_error(struct venus_core *core, u32 event, + struct hfi_msg_event_notify_pkt *pkt) +{ + if (pkt) + dev_dbg(core->dev, + "sys error (session id:%x, data1:%x, data2:%x)\n", + pkt->shdr.session_id, pkt->event_data1, + pkt->event_data2); + + core->core_ops->event_notify(core, event); +} + +static void +event_session_error(struct venus_core *core, struct venus_inst *inst, + struct hfi_msg_event_notify_pkt *pkt) +{ + struct device *dev = core->dev; + + dev_dbg(dev, "session error: event id:%x, session id:%x\n", + pkt->event_data1, pkt->shdr.session_id); + + if (!inst) + return; + + switch (pkt->event_data1) { + /* non fatal session errors */ + case HFI_ERR_SESSION_INVALID_SCALE_FACTOR: + case HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE: + case HFI_ERR_SESSION_UNSUPPORTED_SETTING: + case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED: + inst->error = HFI_ERR_NONE; + break; + default: + dev_err(dev, "session error: event id:%x (%x), session id:%x\n", + pkt->event_data1, pkt->event_data2, + pkt->shdr.session_id); + + inst->error = pkt->event_data1; + inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL); + break; + } +} + +static void hfi_event_notify(struct venus_core *core, struct venus_inst *inst, + void *packet) +{ + struct hfi_msg_event_notify_pkt *pkt = packet; + + if (!packet) + return; + + switch (pkt->event_id) { + case HFI_EVENT_SYS_ERROR: + event_sys_error(core, EVT_SYS_ERROR, pkt); + break; + case HFI_EVENT_SESSION_ERROR: + event_session_error(core, inst, pkt); + break; + case HFI_EVENT_SESSION_SEQUENCE_CHANGED: + event_seq_changed(core, inst, pkt); + break; + case HFI_EVENT_RELEASE_BUFFER_REFERENCE: + event_release_buffer_ref(core, inst, pkt); + break; + case HFI_EVENT_SESSION_PROPERTY_CHANGED: + break; + default: + break; + } +} + +static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst, + void *packet) +{ + struct hfi_msg_sys_init_done_pkt *pkt = packet; + u32 rem_bytes, read_bytes = 0, num_properties; + u32 error, ptype; + u8 *data; + + error = pkt->error_type; + if (error != HFI_ERR_NONE) + goto err_no_prop; + + num_properties = pkt->num_properties; + + if (!num_properties) { + error = HFI_ERR_SYS_INVALID_PARAMETER; + goto err_no_prop; + } + + rem_bytes = pkt->hdr.size - sizeof(*pkt) + sizeof(u32); + + if (!rem_bytes) { + /* missing property data */ + error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + goto err_no_prop; + } + + data = (u8 *)&pkt->data[0]; + + if (core->res->hfi_version == HFI_VERSION_3XX) + goto err_no_prop; + + while (num_properties && rem_bytes >= sizeof(u32)) { + ptype = *((u32 *)data); + data += sizeof(u32); + + switch (ptype) { + case HFI_PROPERTY_PARAM_CODEC_SUPPORTED: { + struct hfi_codec_supported *prop; + + prop = (struct hfi_codec_supported *)data; + + if (rem_bytes < sizeof(*prop)) { + error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + break; + } + + read_bytes += sizeof(*prop) + sizeof(u32); + core->dec_codecs = prop->dec_codecs; + core->enc_codecs = prop->enc_codecs; + break; + } + case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED: { + struct hfi_max_sessions_supported *prop; + + if (rem_bytes < sizeof(*prop)) { + error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + break; + } + + prop = (struct hfi_max_sessions_supported *)data; + read_bytes += sizeof(*prop) + sizeof(u32); + core->max_sessions_supported = prop->max_sessions; + break; + } + default: + error = HFI_ERR_SYS_INVALID_PARAMETER; + break; + } + + if (error) + break; + + rem_bytes -= read_bytes; + data += read_bytes; + num_properties--; + } + +err_no_prop: + core->error = error; + complete(&core->done); +} + +static void +sys_get_prop_image_version(struct device *dev, + struct hfi_msg_sys_property_info_pkt *pkt) +{ + int req_bytes; + + req_bytes = pkt->hdr.size - sizeof(*pkt); + + if (req_bytes < 128 || !pkt->data[1] || pkt->num_properties > 1) + /* bad packet */ + return; + + dev_dbg(dev, "F/W version: %s\n", (u8 *)&pkt->data[1]); +} + +static void hfi_sys_property_info(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_sys_property_info_pkt *pkt = packet; + struct device *dev = core->dev; + + if (!pkt->num_properties) { + dev_dbg(dev, "%s: no properties\n", __func__); + return; + } + + switch (pkt->data[0]) { + case HFI_PROPERTY_SYS_IMAGE_VERSION: + sys_get_prop_image_version(dev, pkt); + break; + default: + dev_dbg(dev, "%s: unknown property data\n", __func__); + break; + } +} + +static void hfi_sys_rel_resource_done(struct venus_core *core, + struct venus_inst *inst, + void *packet) +{ + struct hfi_msg_sys_release_resource_done_pkt *pkt = packet; + + core->error = pkt->error_type; + complete(&core->done); +} + +static void hfi_sys_ping_done(struct venus_core *core, struct venus_inst *inst, + void *packet) +{ + struct hfi_msg_sys_ping_ack_pkt *pkt = packet; + + core->error = HFI_ERR_NONE; + + if (pkt->client_data != 0xbeef) + core->error = HFI_ERR_SYS_FATAL; + + complete(&core->done); +} + +static void hfi_sys_idle_done(struct venus_core *core, struct venus_inst *inst, + void *packet) +{ + dev_dbg(core->dev, "sys idle\n"); +} + +static void hfi_sys_pc_prepare_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_sys_pc_prep_done_pkt *pkt = packet; + + dev_dbg(core->dev, "pc prepare done (error %x)\n", pkt->error_type); +} + +static void +hfi_copy_cap_prop(struct hfi_capability *in, struct venus_inst *inst) +{ + if (!in || !inst) + return; + + switch (in->capability_type) { + case HFI_CAPABILITY_FRAME_WIDTH: + inst->cap_width = *in; + break; + case HFI_CAPABILITY_FRAME_HEIGHT: + inst->cap_height = *in; + break; + case HFI_CAPABILITY_MBS_PER_FRAME: + inst->cap_mbs_per_frame = *in; + break; + case HFI_CAPABILITY_MBS_PER_SECOND: + inst->cap_mbs_per_sec = *in; + break; + case HFI_CAPABILITY_FRAMERATE: + inst->cap_framerate = *in; + break; + case HFI_CAPABILITY_SCALE_X: + inst->cap_scale_x = *in; + break; + case HFI_CAPABILITY_SCALE_Y: + inst->cap_scale_y = *in; + break; + case HFI_CAPABILITY_BITRATE: + inst->cap_bitrate = *in; + break; + case HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS: + inst->cap_hier_p = *in; + break; + case HFI_CAPABILITY_ENC_LTR_COUNT: + inst->cap_ltr_count = *in; + break; + case HFI_CAPABILITY_CP_OUTPUT2_THRESH: + inst->cap_secure_output2_threshold = *in; + break; + default: + break; + } +} + +static unsigned int +session_get_prop_profile_level(struct hfi_msg_session_property_info_pkt *pkt, + struct hfi_profile_level *profile_level) +{ + struct hfi_profile_level *hfi; + u32 req_bytes; + + req_bytes = pkt->shdr.hdr.size - sizeof(*pkt); + + if (!req_bytes || req_bytes % sizeof(struct hfi_profile_level)) + /* bad packet */ + return HFI_ERR_SESSION_INVALID_PARAMETER; + + hfi = (struct hfi_profile_level *)&pkt->data[1]; + profile_level->profile = hfi->profile; + profile_level->level = hfi->level; + + return HFI_ERR_NONE; +} + +static unsigned int +session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt, + struct hfi_buffer_requirements *bufreq) +{ + struct hfi_buffer_requirements *buf_req; + u32 req_bytes; + unsigned int idx = 0; + + req_bytes = pkt->shdr.hdr.size - sizeof(*pkt); + + if (!req_bytes || req_bytes % sizeof(*buf_req) || !pkt->data[1]) + /* bad packet */ + return HFI_ERR_SESSION_INVALID_PARAMETER; + + buf_req = (struct hfi_buffer_requirements *)&pkt->data[1]; + if (!buf_req) + return HFI_ERR_SESSION_INVALID_PARAMETER; + + while (req_bytes) { + memcpy(&bufreq[idx], buf_req, sizeof(*bufreq)); + idx++; + + if (idx > HFI_BUFFER_TYPE_MAX) + return HFI_ERR_SESSION_INVALID_PARAMETER; + + req_bytes -= sizeof(struct hfi_buffer_requirements); + buf_req++; + } + + return HFI_ERR_NONE; +} + +static void hfi_session_prop_info(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_property_info_pkt *pkt = packet; + struct device *dev = core->dev; + union hfi_get_property *hprop = &inst->hprop; + unsigned int error = HFI_ERR_NONE; + + if (!pkt->num_properties) { + error = HFI_ERR_SESSION_INVALID_PARAMETER; + dev_err(dev, "%s: no properties\n", __func__); + goto done; + } + + switch (pkt->data[0]) { + case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS: + memset(hprop->bufreq, 0, sizeof(hprop->bufreq)); + error = session_get_prop_buf_req(pkt, hprop->bufreq); + break; + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: + memset(&hprop->profile_level, 0, sizeof(hprop->profile_level)); + error = session_get_prop_profile_level(pkt, + &hprop->profile_level); + break; + case HFI_PROPERTY_CONFIG_VDEC_ENTROPY: + break; + default: + dev_dbg(dev, "%s: unknown property id:%x\n", __func__, + pkt->data[0]); + return; + } + +done: + inst->error = error; + complete(&inst->done); +} + +static u32 init_done_read_prop(struct venus_core *core, struct venus_inst *inst, + struct hfi_msg_session_init_done_pkt *pkt) +{ + struct device *dev = core->dev; + u32 rem_bytes, num_props; + u32 ptype, next_offset = 0; + u32 err; + u8 *data; + + rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt) + sizeof(u32); + if (!rem_bytes) { + dev_err(dev, "%s: missing property info\n", __func__); + return HFI_ERR_SESSION_INSUFFICIENT_RESOURCES; + } + + err = pkt->error_type; + if (err) + return err; + + data = (u8 *)&pkt->data[0]; + num_props = pkt->num_properties; + + while (err == HFI_ERR_NONE && num_props && rem_bytes >= sizeof(u32)) { + ptype = *((u32 *)data); + next_offset = sizeof(u32); + + switch (ptype) { + case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED: { + struct hfi_codec_mask_supported *masks = + (struct hfi_codec_mask_supported *) + (data + next_offset); + + next_offset += sizeof(*masks); + num_props--; + break; + } + case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED: { + struct hfi_capabilities *caps; + struct hfi_capability *cap; + u32 num_caps; + + if ((rem_bytes - next_offset) < sizeof(*cap)) { + err = HFI_ERR_SESSION_INVALID_PARAMETER; + break; + } + + caps = (struct hfi_capabilities *)(data + next_offset); + + num_caps = caps->num_capabilities; + cap = &caps->data[0]; + next_offset += sizeof(u32); + + while (num_caps && + (rem_bytes - next_offset) >= sizeof(u32)) { + hfi_copy_cap_prop(cap, inst); + cap++; + next_offset += sizeof(*cap); + num_caps--; + } + num_props--; + break; + } + case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED: { + struct hfi_uncompressed_format_supported *prop = + (struct hfi_uncompressed_format_supported *) + (data + next_offset); + u32 num_fmt_entries; + u8 *fmt; + struct hfi_uncompressed_plane_info *inf; + + if ((rem_bytes - next_offset) < sizeof(*prop)) { + err = HFI_ERR_SESSION_INVALID_PARAMETER; + break; + } + + num_fmt_entries = prop->format_entries; + next_offset = sizeof(*prop) - sizeof(u32); + fmt = (u8 *)&prop->format_info[0]; + + dev_dbg(dev, "uncomm format support num entries:%u\n", + num_fmt_entries); + + while (num_fmt_entries) { + struct hfi_uncompressed_plane_constraints *cnts; + u32 bytes_to_skip; + + inf = (struct hfi_uncompressed_plane_info *)fmt; + + if ((rem_bytes - next_offset) < sizeof(*inf)) { + err = HFI_ERR_SESSION_INVALID_PARAMETER; + break; + } + + dev_dbg(dev, "plane info: fmt:%x, planes:%x\n", + inf->format, inf->num_planes); + + cnts = &inf->plane_format[0]; + dev_dbg(dev, "%u %u %u %u\n", + cnts->stride_multiples, + cnts->max_stride, + cnts->min_plane_buffer_height_multiple, + cnts->buffer_alignment); + + bytes_to_skip = sizeof(*inf) - sizeof(*cnts) + + inf->num_planes * sizeof(*cnts); + + fmt += bytes_to_skip; + next_offset += bytes_to_skip; + num_fmt_entries--; + } + num_props--; + break; + } + case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED: { + struct hfi_properties_supported *prop = + (struct hfi_properties_supported *) + (data + next_offset); + + next_offset += sizeof(*prop) - sizeof(u32) + + prop->num_properties * sizeof(u32); + num_props--; + break; + } + case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED: { + struct hfi_profile_level_supported *prop = + (struct hfi_profile_level_supported *) + (data + next_offset); + struct hfi_profile_level *pl; + unsigned int prop_count = 0; + unsigned int count = 0; + u8 *ptr; + + ptr = (u8 *)&prop->profile_level[0]; + prop_count = prop->profile_count; + + if (prop_count > HFI_MAX_PROFILE_COUNT) + prop_count = HFI_MAX_PROFILE_COUNT; + + while (prop_count) { + ptr++; + pl = (struct hfi_profile_level *)ptr; + + inst->pl[count].profile = pl->profile; + inst->pl[count].level = pl->level; + prop_count--; + count++; + ptr += sizeof(*pl) / sizeof(u32); + } + + inst->pl_count = count; + next_offset += sizeof(*prop) - sizeof(*pl) + + prop->profile_count * sizeof(*pl); + + num_props--; + break; + } + case HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED: { + next_offset += + sizeof(struct hfi_interlace_format_supported); + num_props--; + break; + } + case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED: { + struct hfi_nal_stream_format *nal = + (struct hfi_nal_stream_format *) + (data + next_offset); + dev_dbg(dev, "NAL format: %x\n", nal->format); + next_offset += sizeof(*nal); + num_props--; + break; + } + case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT: { + next_offset += sizeof(u32); + num_props--; + break; + } + case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE: { + u32 *max_seq_sz = (u32 *)(data + next_offset); + + dev_dbg(dev, "max seq header sz: %x\n", *max_seq_sz); + next_offset += sizeof(u32); + num_props--; + break; + } + case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: { + next_offset += sizeof(struct hfi_intra_refresh); + num_props--; + break; + } + case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED: { + struct hfi_buffer_alloc_mode_supported *prop = + (struct hfi_buffer_alloc_mode_supported *) + (data + next_offset); + unsigned int i; + + for (i = 0; i < prop->num_entries; i++) { + if (prop->buffer_type == HFI_BUFFER_OUTPUT || + prop->buffer_type == HFI_BUFFER_OUTPUT2) { + switch (prop->data[i]) { + case HFI_BUFFER_MODE_STATIC: + inst->cap_bufs_mode_static = 1; + break; + case HFI_BUFFER_MODE_DYNAMIC: + inst->cap_bufs_mode_dynamic = 1; + break; + default: + break; + } + } + } + next_offset += sizeof(*prop) - + sizeof(u32) + prop->num_entries * sizeof(u32); + num_props--; + break; + } + default: + dev_dbg(dev, "%s: default case %#x\n", __func__, ptype); + break; + } + + rem_bytes -= next_offset; + data += next_offset; + } + + return err; +} + +static void hfi_session_init_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_init_done_pkt *pkt = packet; + unsigned int error; + + error = pkt->error_type; + if (error != HFI_ERR_NONE) + goto done; + + if (core->res->hfi_version != HFI_VERSION_1XX) + goto done; + + error = init_done_read_prop(core, inst, pkt); + +done: + inst->error = error; + complete(&inst->done); +} + +static void hfi_session_load_res_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_load_resources_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_flush_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_flush_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_etb_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_empty_buffer_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + inst->ops->buf_done(inst, HFI_BUFFER_INPUT, pkt->input_tag, + pkt->filled_len, pkt->offset, 0, 0, 0); +} + +static void hfi_session_ftb_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + u32 session_type = inst->session_type; + u64 timestamp_us = 0; + u32 timestamp_hi = 0, timestamp_lo = 0; + unsigned int error; + u32 flags = 0, hfi_flags = 0, offset = 0, filled_len = 0; + u32 pic_type = 0, buffer_type = 0, output_tag = -1; + + if (session_type == VIDC_SESSION_TYPE_ENC) { + struct hfi_msg_session_fbd_compressed_pkt *pkt = packet; + + timestamp_hi = pkt->time_stamp_hi; + timestamp_lo = pkt->time_stamp_lo; + hfi_flags = pkt->flags; + offset = pkt->offset; + filled_len = pkt->filled_len; + pic_type = pkt->picture_type; + output_tag = pkt->output_tag; + buffer_type = HFI_BUFFER_OUTPUT; + + error = pkt->error_type; + } else if (session_type == VIDC_SESSION_TYPE_DEC) { + struct hfi_msg_session_fbd_uncompressed_plane0_pkt *pkt = + packet; + + timestamp_hi = pkt->time_stamp_hi; + timestamp_lo = pkt->time_stamp_lo; + hfi_flags = pkt->flags; + offset = pkt->offset; + filled_len = pkt->filled_len; + pic_type = pkt->picture_type; + output_tag = pkt->output_tag; + + if (pkt->stream_id == 0) + buffer_type = HFI_BUFFER_OUTPUT; + else if (pkt->stream_id == 1) + buffer_type = HFI_BUFFER_OUTPUT2; + + error = pkt->error_type; + } else { + error = HFI_ERR_SESSION_INVALID_PARAMETER; + } + + if (buffer_type != HFI_BUFFER_OUTPUT) + goto done; + + if (hfi_flags & HFI_BUFFERFLAG_EOS) + flags |= V4L2_BUF_FLAG_LAST; + + switch (pic_type) { + case HFI_PICTURE_IDR: + case HFI_PICTURE_I: + flags |= V4L2_BUF_FLAG_KEYFRAME; + break; + case HFI_PICTURE_P: + flags |= V4L2_BUF_FLAG_PFRAME; + break; + case HFI_PICTURE_B: + flags |= V4L2_BUF_FLAG_BFRAME; + break; + case HFI_FRAME_NOTCODED: + case HFI_UNUSED_PICT: + case HFI_FRAME_YUV: + default: + break; + } + + if (!(hfi_flags & HFI_BUFFERFLAG_TIMESTAMPINVALID) && filled_len) { + timestamp_us = timestamp_hi; + timestamp_us = (timestamp_us << 32) | timestamp_lo; + } + +done: + inst->error = error; + inst->ops->buf_done(inst, buffer_type, output_tag, filled_len, + offset, flags, hfi_flags, timestamp_us); +} + +static void hfi_session_start_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_start_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_stop_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_stop_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_rel_res_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_release_resources_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_rel_buf_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_release_buffers_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_end_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_end_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_abort_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_sys_session_abort_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +static void hfi_session_get_seq_hdr_done(struct venus_core *core, + struct venus_inst *inst, void *packet) +{ + struct hfi_msg_session_get_sequence_hdr_done_pkt *pkt = packet; + + inst->error = pkt->error_type; + complete(&inst->done); +} + +struct hfi_done_handler { + u32 pkt; + u32 pkt_sz; + u32 pkt_sz2; + void (*done)(struct venus_core *, struct venus_inst *, void *); + bool is_sys_pkt; +}; + +static const struct hfi_done_handler handlers[] = { + {.pkt = HFI_MSG_EVENT_NOTIFY, + .pkt_sz = sizeof(struct hfi_msg_event_notify_pkt), + .done = hfi_event_notify, + }, + {.pkt = HFI_MSG_SYS_INIT, + .pkt_sz = sizeof(struct hfi_msg_sys_init_done_pkt), + .done = hfi_sys_init_done, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_PROPERTY_INFO, + .pkt_sz = sizeof(struct hfi_msg_sys_property_info_pkt), + .done = hfi_sys_property_info, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_RELEASE_RESOURCE, + .pkt_sz = sizeof(struct hfi_msg_sys_release_resource_done_pkt), + .done = hfi_sys_rel_resource_done, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_PING_ACK, + .pkt_sz = sizeof(struct hfi_msg_sys_ping_ack_pkt), + .done = hfi_sys_ping_done, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_IDLE, + .pkt_sz = sizeof(struct hfi_msg_sys_idle_pkt), + .done = hfi_sys_idle_done, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_PC_PREP, + .pkt_sz = sizeof(struct hfi_msg_sys_pc_prep_done_pkt), + .done = hfi_sys_pc_prepare_done, + .is_sys_pkt = true, + }, + {.pkt = HFI_MSG_SYS_SESSION_INIT, + .pkt_sz = sizeof(struct hfi_msg_session_init_done_pkt), + .done = hfi_session_init_done, + }, + {.pkt = HFI_MSG_SYS_SESSION_END, + .pkt_sz = sizeof(struct hfi_msg_session_end_done_pkt), + .done = hfi_session_end_done, + }, + {.pkt = HFI_MSG_SESSION_LOAD_RESOURCES, + .pkt_sz = sizeof(struct hfi_msg_session_load_resources_done_pkt), + .done = hfi_session_load_res_done, + }, + {.pkt = HFI_MSG_SESSION_START, + .pkt_sz = sizeof(struct hfi_msg_session_start_done_pkt), + .done = hfi_session_start_done, + }, + {.pkt = HFI_MSG_SESSION_STOP, + .pkt_sz = sizeof(struct hfi_msg_session_stop_done_pkt), + .done = hfi_session_stop_done, + }, + {.pkt = HFI_MSG_SYS_SESSION_ABORT, + .pkt_sz = sizeof(struct hfi_msg_sys_session_abort_done_pkt), + .done = hfi_session_abort_done, + }, + {.pkt = HFI_MSG_SESSION_EMPTY_BUFFER, + .pkt_sz = sizeof(struct hfi_msg_session_empty_buffer_done_pkt), + .done = hfi_session_etb_done, + }, + {.pkt = HFI_MSG_SESSION_FILL_BUFFER, + .pkt_sz = sizeof(struct hfi_msg_session_fbd_uncompressed_plane0_pkt), + .pkt_sz2 = sizeof(struct hfi_msg_session_fbd_compressed_pkt), + .done = hfi_session_ftb_done, + }, + {.pkt = HFI_MSG_SESSION_FLUSH, + .pkt_sz = sizeof(struct hfi_msg_session_flush_done_pkt), + .done = hfi_session_flush_done, + }, + {.pkt = HFI_MSG_SESSION_PROPERTY_INFO, + .pkt_sz = sizeof(struct hfi_msg_session_property_info_pkt), + .done = hfi_session_prop_info, + }, + {.pkt = HFI_MSG_SESSION_RELEASE_RESOURCES, + .pkt_sz = sizeof(struct hfi_msg_session_release_resources_done_pkt), + .done = hfi_session_rel_res_done, + }, + {.pkt = HFI_MSG_SESSION_GET_SEQUENCE_HEADER, + .pkt_sz = sizeof(struct hfi_msg_session_get_sequence_hdr_done_pkt), + .done = hfi_session_get_seq_hdr_done, + }, + {.pkt = HFI_MSG_SESSION_RELEASE_BUFFERS, + .pkt_sz = sizeof(struct hfi_msg_session_release_buffers_done_pkt), + .done = hfi_session_rel_buf_done, + }, +}; + +void hfi_process_watchdog_timeout(struct venus_core *core) +{ + event_sys_error(core, EVT_SYS_WATCHDOG_TIMEOUT, NULL); +} + +static struct venus_inst *to_instance(struct venus_core *core, u32 session_id) +{ + struct venus_inst *inst; + + mutex_lock(&core->lock); + list_for_each_entry(inst, &core->instances, list) + if (hash32_ptr(inst) == session_id) { + mutex_unlock(&core->lock); + return inst; + } + mutex_unlock(&core->lock); + + return NULL; +} + +u32 hfi_process_msg_packet(struct venus_core *core, struct hfi_pkt_hdr *hdr) +{ + const struct hfi_done_handler *handler; + struct device *dev = core->dev; + struct venus_inst *inst; + bool found = false; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(handlers); i++) { + handler = &handlers[i]; + if (handler->pkt != hdr->pkt_type) + continue; + found = true; + break; + } + + if (!found) + return hdr->pkt_type; + + if (hdr->size && hdr->size < handler->pkt_sz && + hdr->size < handler->pkt_sz2) { + dev_err(dev, "bad packet size (%d should be %d, pkt type:%x)\n", + hdr->size, handler->pkt_sz, hdr->pkt_type); + + return hdr->pkt_type; + } + + if (handler->is_sys_pkt) { + inst = NULL; + } else { + struct hfi_session_pkt *pkt; + + pkt = (struct hfi_session_pkt *)hdr; + inst = to_instance(core, pkt->shdr.session_id); + + if (!inst) + dev_warn(dev, "no valid instance(pkt session_id:%x, pkt:%x)\n", + pkt->shdr.session_id, + handler ? handler->pkt : 0); + + /* + * Event of type HFI_EVENT_SYS_ERROR will not have any session + * associated with it + */ + if (!inst && hdr->pkt_type != HFI_MSG_EVENT_NOTIFY) { + dev_err(dev, "got invalid session id:%x\n", + pkt->shdr.session_id); + goto invalid_session; + } + } + + handler->done(core, inst, hdr); + +invalid_session: + return hdr->pkt_type; +} diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.h b/drivers/media/platform/qcom/venus/hfi_msgs.h new file mode 100644 index 000000000000..14d9a3979b14 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_msgs.h @@ -0,0 +1,283 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __VENUS_HFI_MSGS_H__ +#define __VENUS_HFI_MSGS_H__ + +/* message calls */ +#define HFI_MSG_SYS_INIT 0x20001 +#define HFI_MSG_SYS_PC_PREP 0x20002 +#define HFI_MSG_SYS_RELEASE_RESOURCE 0x20003 +#define HFI_MSG_SYS_DEBUG 0x20004 +#define HFI_MSG_SYS_SESSION_INIT 0x20006 +#define HFI_MSG_SYS_SESSION_END 0x20007 +#define HFI_MSG_SYS_IDLE 0x20008 +#define HFI_MSG_SYS_COV 0x20009 +#define HFI_MSG_SYS_PROPERTY_INFO 0x2000a + +#define HFI_MSG_EVENT_NOTIFY 0x21001 +#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER 0x21002 + +#define HFI_MSG_SYS_PING_ACK 0x220002 +#define HFI_MSG_SYS_SESSION_ABORT 0x220004 + +#define HFI_MSG_SESSION_LOAD_RESOURCES 0x221001 +#define HFI_MSG_SESSION_START 0x221002 +#define HFI_MSG_SESSION_STOP 0x221003 +#define HFI_MSG_SESSION_SUSPEND 0x221004 +#define HFI_MSG_SESSION_RESUME 0x221005 +#define HFI_MSG_SESSION_FLUSH 0x221006 +#define HFI_MSG_SESSION_EMPTY_BUFFER 0x221007 +#define HFI_MSG_SESSION_FILL_BUFFER 0x221008 +#define HFI_MSG_SESSION_PROPERTY_INFO 0x221009 +#define HFI_MSG_SESSION_RELEASE_RESOURCES 0x22100a +#define HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER 0x22100b +#define HFI_MSG_SESSION_RELEASE_BUFFERS 0x22100c + +#define HFI_PICTURE_I 0x00000001 +#define HFI_PICTURE_P 0x00000002 +#define HFI_PICTURE_B 0x00000004 +#define HFI_PICTURE_IDR 0x00000008 +#define HFI_FRAME_NOTCODED 0x7f002000 +#define HFI_FRAME_YUV 0x7f004000 +#define HFI_UNUSED_PICT 0x10000000 + +/* message packets */ +struct hfi_msg_event_notify_pkt { + struct hfi_session_hdr_pkt shdr; + u32 event_id; + u32 event_data1; + u32 event_data2; + u32 ext_event_data[1]; +}; + +struct hfi_msg_event_release_buffer_ref_pkt { + u32 packet_buffer; + u32 extradata_buffer; + u32 output_tag; +}; + +struct hfi_msg_sys_init_done_pkt { + struct hfi_pkt_hdr hdr; + u32 error_type; + u32 num_properties; + u32 data[1]; +}; + +struct hfi_msg_sys_pc_prep_done_pkt { + struct hfi_pkt_hdr hdr; + u32 error_type; +}; + +struct hfi_msg_sys_release_resource_done_pkt { + struct hfi_pkt_hdr hdr; + u32 resource_handle; + u32 error_type; +}; + +struct hfi_msg_session_init_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 num_properties; + u32 data[1]; +}; + +struct hfi_msg_session_end_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_get_sequence_hdr_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 header_len; + u32 sequence_header; +}; + +struct hfi_msg_sys_session_abort_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_sys_idle_pkt { + struct hfi_pkt_hdr hdr; +}; + +struct hfi_msg_sys_ping_ack_pkt { + struct hfi_pkt_hdr hdr; + u32 client_data; +}; + +struct hfi_msg_sys_property_info_pkt { + struct hfi_pkt_hdr hdr; + u32 num_properties; + u32 data[1]; +}; + +struct hfi_msg_session_load_resources_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_start_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_stop_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_suspend_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_resume_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_flush_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 flush_type; +}; + +struct hfi_msg_session_empty_buffer_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 offset; + u32 filled_len; + u32 input_tag; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[0]; +}; + +struct hfi_msg_session_fbd_compressed_pkt { + struct hfi_session_hdr_pkt shdr; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 error_type; + u32 flags; + u32 mark_target; + u32 mark_data; + u32 stats; + u32 offset; + u32 alloc_len; + u32 filled_len; + u32 input_tag; + u32 output_tag; + u32 picture_type; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[0]; +}; + +struct hfi_msg_session_fbd_uncompressed_plane0_pkt { + struct hfi_session_hdr_pkt shdr; + u32 stream_id; + u32 view_id; + u32 error_type; + u32 time_stamp_hi; + u32 time_stamp_lo; + u32 flags; + u32 mark_target; + u32 mark_data; + u32 stats; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 frame_width; + u32 frame_height; + u32 start_x_coord; + u32 start_y_coord; + u32 input_tag; + u32 input_tag2; + u32 output_tag; + u32 picture_type; + u32 packet_buffer; + u32 extradata_buffer; + u32 data[0]; +}; + +struct hfi_msg_session_fbd_uncompressed_plane1_pkt { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer2; + u32 data[0]; +}; + +struct hfi_msg_session_fbd_uncompressed_plane2_pkt { + u32 flags; + u32 alloc_len; + u32 filled_len; + u32 offset; + u32 packet_buffer3; + u32 data[0]; +}; + +struct hfi_msg_session_parse_sequence_header_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 num_properties; + u32 data[1]; +}; + +struct hfi_msg_session_property_info_pkt { + struct hfi_session_hdr_pkt shdr; + u32 num_properties; + u32 data[1]; +}; + +struct hfi_msg_session_release_resources_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; +}; + +struct hfi_msg_session_release_buffers_done_pkt { + struct hfi_session_hdr_pkt shdr; + u32 error_type; + u32 num_buffers; + u32 buffer_info[1]; +}; + +struct hfi_msg_sys_debug_pkt { + struct hfi_pkt_hdr hdr; + u32 msg_type; + u32 msg_size; + u32 time_stamp_hi; + u32 time_stamp_lo; + u8 msg_data[1]; +}; + +struct hfi_msg_sys_coverage_pkt { + struct hfi_pkt_hdr hdr; + u32 msg_size; + u32 time_stamp_hi; + u32 time_stamp_lo; + u8 msg_data[1]; +}; + +struct venus_core; +struct hfi_pkt_hdr; + +void hfi_process_watchdog_timeout(struct venus_core *core); +u32 hfi_process_msg_packet(struct venus_core *core, struct hfi_pkt_hdr *hdr); + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c new file mode 100644 index 000000000000..1caae8feaa36 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -0,0 +1,1572 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/qcom_scm.h> +#include <linux/slab.h> + +#include "core.h" +#include "hfi_cmds.h" +#include "hfi_msgs.h" +#include "hfi_venus.h" +#include "hfi_venus_io.h" + +#define HFI_MASK_QHDR_TX_TYPE 0xff000000 +#define HFI_MASK_QHDR_RX_TYPE 0x00ff0000 +#define HFI_MASK_QHDR_PRI_TYPE 0x0000ff00 +#define HFI_MASK_QHDR_ID_TYPE 0x000000ff + +#define HFI_HOST_TO_CTRL_CMD_Q 0 +#define HFI_CTRL_TO_HOST_MSG_Q 1 +#define HFI_CTRL_TO_HOST_DBG_Q 2 +#define HFI_MASK_QHDR_STATUS 0x000000ff + +#define IFACEQ_NUM 3 +#define IFACEQ_CMD_IDX 0 +#define IFACEQ_MSG_IDX 1 +#define IFACEQ_DBG_IDX 2 +#define IFACEQ_MAX_BUF_COUNT 50 +#define IFACEQ_MAX_PARALLEL_CLNTS 16 +#define IFACEQ_DFLT_QHDR 0x01010000 + +#define POLL_INTERVAL_US 50 + +#define IFACEQ_MAX_PKT_SIZE 1024 +#define IFACEQ_MED_PKT_SIZE 768 +#define IFACEQ_MIN_PKT_SIZE 8 +#define IFACEQ_VAR_SMALL_PKT_SIZE 100 +#define IFACEQ_VAR_LARGE_PKT_SIZE 512 +#define IFACEQ_VAR_HUGE_PKT_SIZE (1024 * 12) + +enum tzbsp_video_state { + TZBSP_VIDEO_STATE_SUSPEND = 0, + TZBSP_VIDEO_STATE_RESUME +}; + +struct hfi_queue_table_header { + u32 version; + u32 size; + u32 qhdr0_offset; + u32 qhdr_size; + u32 num_q; + u32 num_active_q; +}; + +struct hfi_queue_header { + u32 status; + u32 start_addr; + u32 type; + u32 q_size; + u32 pkt_size; + u32 pkt_drop_cnt; + u32 rx_wm; + u32 tx_wm; + u32 rx_req; + u32 tx_req; + u32 rx_irq_status; + u32 tx_irq_status; + u32 read_idx; + u32 write_idx; +}; + +#define IFACEQ_TABLE_SIZE \ + (sizeof(struct hfi_queue_table_header) + \ + sizeof(struct hfi_queue_header) * IFACEQ_NUM) + +#define IFACEQ_QUEUE_SIZE (IFACEQ_MAX_PKT_SIZE * \ + IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS) + +#define IFACEQ_GET_QHDR_START_ADDR(ptr, i) \ + (void *)(((ptr) + sizeof(struct hfi_queue_table_header)) + \ + ((i) * sizeof(struct hfi_queue_header))) + +#define QDSS_SIZE SZ_4K +#define SFR_SIZE SZ_4K +#define QUEUE_SIZE \ + (IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM)) + +#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K) +#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K) +#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K) +#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \ + ALIGNED_QDSS_SIZE, SZ_1M) + +struct mem_desc { + dma_addr_t da; /* device address */ + void *kva; /* kernel virtual address */ + u32 size; + unsigned long attrs; +}; + +struct iface_queue { + struct hfi_queue_header *qhdr; + struct mem_desc qmem; +}; + +enum venus_state { + VENUS_STATE_DEINIT = 1, + VENUS_STATE_INIT, +}; + +struct venus_hfi_device { + struct venus_core *core; + u32 irq_status; + u32 last_packet_type; + bool power_enabled; + bool suspended; + enum venus_state state; + /* serialize read / write to the shared memory */ + struct mutex lock; + struct completion pwr_collapse_prep; + struct completion release_resource; + struct mem_desc ifaceq_table; + struct mem_desc sfr; + struct iface_queue queues[IFACEQ_NUM]; + u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE]; + u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE]; +}; + +static bool venus_pkt_debug; +static int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL; +static bool venus_sys_idle_indicator; +static bool venus_fw_low_power_mode = true; +static int venus_hw_rsp_timeout = 1000; +static bool venus_fw_coverage; + +static void venus_set_state(struct venus_hfi_device *hdev, + enum venus_state state) +{ + mutex_lock(&hdev->lock); + hdev->state = state; + mutex_unlock(&hdev->lock); +} + +static bool venus_is_valid_state(struct venus_hfi_device *hdev) +{ + return hdev->state != VENUS_STATE_DEINIT; +} + +static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet) +{ + size_t pkt_size = *(u32 *)packet; + + if (!venus_pkt_debug) + return; + + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet, + pkt_size, true); +} + +static int venus_write_queue(struct venus_hfi_device *hdev, + struct iface_queue *queue, + void *packet, u32 *rx_req) +{ + struct hfi_queue_header *qhdr; + u32 dwords, new_wr_idx; + u32 empty_space, rd_idx, wr_idx, qsize; + u32 *wr_ptr; + + if (!queue->qmem.kva) + return -EINVAL; + + qhdr = queue->qhdr; + if (!qhdr) + return -EINVAL; + + venus_dump_packet(hdev, packet); + + dwords = (*(u32 *)packet) >> 2; + if (!dwords) + return -EINVAL; + + rd_idx = qhdr->read_idx; + wr_idx = qhdr->write_idx; + qsize = qhdr->q_size; + /* ensure rd/wr indices's are read from memory */ + rmb(); + + if (wr_idx >= rd_idx) + empty_space = qsize - (wr_idx - rd_idx); + else + empty_space = rd_idx - wr_idx; + + if (empty_space <= dwords) { + qhdr->tx_req = 1; + /* ensure tx_req is updated in memory */ + wmb(); + return -ENOSPC; + } + + qhdr->tx_req = 0; + /* ensure tx_req is updated in memory */ + wmb(); + + new_wr_idx = wr_idx + dwords; + wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2)); + if (new_wr_idx < qsize) { + memcpy(wr_ptr, packet, dwords << 2); + } else { + size_t len; + + new_wr_idx -= qsize; + len = (dwords - new_wr_idx) << 2; + memcpy(wr_ptr, packet, len); + memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2); + } + + /* make sure packet is written before updating the write index */ + wmb(); + + qhdr->write_idx = new_wr_idx; + *rx_req = qhdr->rx_req ? 1 : 0; + + /* make sure write index is updated before an interrupt is raised */ + mb(); + + return 0; +} + +static int venus_read_queue(struct venus_hfi_device *hdev, + struct iface_queue *queue, void *pkt, u32 *tx_req) +{ + struct hfi_queue_header *qhdr; + u32 dwords, new_rd_idx; + u32 rd_idx, wr_idx, type, qsize; + u32 *rd_ptr; + u32 recv_request = 0; + int ret = 0; + + if (!queue->qmem.kva) + return -EINVAL; + + qhdr = queue->qhdr; + if (!qhdr) + return -EINVAL; + + type = qhdr->type; + rd_idx = qhdr->read_idx; + wr_idx = qhdr->write_idx; + qsize = qhdr->q_size; + + /* make sure data is valid before using it */ + rmb(); + + /* + * Do not set receive request for debug queue, if set, Venus generates + * interrupt for debug messages even when there is no response message + * available. In general debug queue will not become full as it is being + * emptied out for every interrupt from Venus. Venus will anyway + * generates interrupt if it is full. + */ + if (type & HFI_CTRL_TO_HOST_MSG_Q) + recv_request = 1; + + if (rd_idx == wr_idx) { + qhdr->rx_req = recv_request; + *tx_req = 0; + /* update rx_req field in memory */ + wmb(); + return -ENODATA; + } + + rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2)); + dwords = *rd_ptr >> 2; + if (!dwords) + return -EINVAL; + + new_rd_idx = rd_idx + dwords; + if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) { + if (new_rd_idx < qsize) { + memcpy(pkt, rd_ptr, dwords << 2); + } else { + size_t len; + + new_rd_idx -= qsize; + len = (dwords - new_rd_idx) << 2; + memcpy(pkt, rd_ptr, len); + memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2); + } + } else { + /* bad packet received, dropping */ + new_rd_idx = qhdr->write_idx; + ret = -EBADMSG; + } + + /* ensure the packet is read before updating read index */ + rmb(); + + qhdr->read_idx = new_rd_idx; + /* ensure updating read index */ + wmb(); + + rd_idx = qhdr->read_idx; + wr_idx = qhdr->write_idx; + /* ensure rd/wr indices are read from memory */ + rmb(); + + if (rd_idx != wr_idx) + qhdr->rx_req = 0; + else + qhdr->rx_req = recv_request; + + *tx_req = qhdr->tx_req ? 1 : 0; + + /* ensure rx_req is stored to memory and tx_req is loaded from memory */ + mb(); + + venus_dump_packet(hdev, pkt); + + return ret; +} + +static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc, + u32 size) +{ + struct device *dev = hdev->core->dev; + + desc->attrs = DMA_ATTR_WRITE_COMBINE; + desc->size = ALIGN(size, SZ_4K); + + desc->kva = dma_alloc_attrs(dev, size, &desc->da, GFP_KERNEL, + desc->attrs); + if (!desc->kva) + return -ENOMEM; + + return 0; +} + +static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem) +{ + struct device *dev = hdev->core->dev; + + dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs); +} + +static void venus_writel(struct venus_hfi_device *hdev, u32 reg, u32 value) +{ + writel(value, hdev->core->base + reg); +} + +static u32 venus_readl(struct venus_hfi_device *hdev, u32 reg) +{ + return readl(hdev->core->base + reg); +} + +static void venus_set_registers(struct venus_hfi_device *hdev) +{ + const struct venus_resources *res = hdev->core->res; + const struct reg_val *tbl = res->reg_tbl; + unsigned int count = res->reg_tbl_size; + unsigned int i; + + for (i = 0; i < count; i++) + venus_writel(hdev, tbl[i].reg, tbl[i].value); +} + +static void venus_soft_int(struct venus_hfi_device *hdev) +{ + venus_writel(hdev, CPU_IC_SOFTINT, BIT(CPU_IC_SOFTINT_H2A_SHIFT)); +} + +static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev, + void *pkt) +{ + struct device *dev = hdev->core->dev; + struct hfi_pkt_hdr *cmd_packet; + struct iface_queue *queue; + u32 rx_req; + int ret; + + if (!venus_is_valid_state(hdev)) + return -EINVAL; + + cmd_packet = (struct hfi_pkt_hdr *)pkt; + hdev->last_packet_type = cmd_packet->pkt_type; + + queue = &hdev->queues[IFACEQ_CMD_IDX]; + + ret = venus_write_queue(hdev, queue, pkt, &rx_req); + if (ret) { + dev_err(dev, "write to iface cmd queue failed (%d)\n", ret); + return ret; + } + + if (rx_req) + venus_soft_int(hdev); + + return 0; +} + +static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt) +{ + int ret; + + mutex_lock(&hdev->lock); + ret = venus_iface_cmdq_write_nolock(hdev, pkt); + mutex_unlock(&hdev->lock); + + return ret; +} + +static int venus_hfi_core_set_resource(struct venus_core *core, u32 id, + u32 size, u32 addr, void *cookie) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct hfi_sys_set_resource_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + int ret; + + if (id == VIDC_RESOURCE_NONE) + return 0; + + pkt = (struct hfi_sys_set_resource_pkt *)packet; + + ret = pkt_sys_set_resource(pkt, id, size, addr, cookie); + if (ret) + return ret; + + ret = venus_iface_cmdq_write(hdev, pkt); + if (ret) + return ret; + + return 0; +} + +static int venus_boot_core(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + static const unsigned int max_tries = 100; + u32 ctrl_status = 0; + unsigned int count = 0; + int ret = 0; + + venus_writel(hdev, VIDC_CTRL_INIT, BIT(VIDC_CTRL_INIT_CTRL_SHIFT)); + venus_writel(hdev, WRAPPER_INTR_MASK, WRAPPER_INTR_MASK_A2HVCODEC_MASK); + venus_writel(hdev, CPU_CS_SCIACMDARG3, 1); + + while (!ctrl_status && count < max_tries) { + ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) { + dev_err(dev, "invalid setting for UC_REGION\n"); + ret = -EINVAL; + break; + } + + usleep_range(500, 1000); + count++; + } + + if (count >= max_tries) + ret = -ETIMEDOUT; + + return ret; +} + +static u32 venus_hwversion(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + u32 ver = venus_readl(hdev, WRAPPER_HW_VERSION); + u32 major, minor, step; + + major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK; + major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT; + minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK; + minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT; + step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK; + + dev_dbg(dev, "venus hw version %x.%x.%x\n", major, minor, step); + + return major; +} + +static int venus_run(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + int ret; + + /* + * Re-program all of the registers that get reset as a result of + * regulator_disable() and _enable() + */ + venus_set_registers(hdev); + + venus_writel(hdev, UC_REGION_ADDR, hdev->ifaceq_table.da); + venus_writel(hdev, UC_REGION_SIZE, SHARED_QSIZE); + venus_writel(hdev, CPU_CS_SCIACMDARG2, hdev->ifaceq_table.da); + venus_writel(hdev, CPU_CS_SCIACMDARG1, 0x01); + if (hdev->sfr.da) + venus_writel(hdev, SFR_ADDR, hdev->sfr.da); + + ret = venus_boot_core(hdev); + if (ret) { + dev_err(dev, "failed to reset venus core\n"); + return ret; + } + + venus_hwversion(hdev); + + return 0; +} + +static int venus_halt_axi(struct venus_hfi_device *hdev) +{ + void __iomem *base = hdev->core->base; + struct device *dev = hdev->core->dev; + u32 val; + int ret; + + /* Halt AXI and AXI IMEM VBIF Access */ + val = venus_readl(hdev, VBIF_AXI_HALT_CTRL0); + val |= VBIF_AXI_HALT_CTRL0_HALT_REQ; + venus_writel(hdev, VBIF_AXI_HALT_CTRL0, val); + + /* Request for AXI bus port halt */ + ret = readl_poll_timeout(base + VBIF_AXI_HALT_CTRL1, val, + val & VBIF_AXI_HALT_CTRL1_HALT_ACK, + POLL_INTERVAL_US, + VBIF_AXI_HALT_ACK_TIMEOUT_US); + if (ret) { + dev_err(dev, "AXI bus port halt timeout\n"); + return ret; + } + + return 0; +} + +static int venus_power_off(struct venus_hfi_device *hdev) +{ + int ret; + + if (!hdev->power_enabled) + return 0; + + ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0); + if (ret) + return ret; + + ret = venus_halt_axi(hdev); + if (ret) + return ret; + + hdev->power_enabled = false; + + return 0; +} + +static int venus_power_on(struct venus_hfi_device *hdev) +{ + int ret; + + if (hdev->power_enabled) + return 0; + + ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_RESUME, 0); + if (ret) + goto err; + + ret = venus_run(hdev); + if (ret) + goto err_suspend; + + hdev->power_enabled = true; + + return 0; + +err_suspend: + qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0); +err: + hdev->power_enabled = false; + return ret; +} + +static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev, + void *pkt) +{ + struct iface_queue *queue; + u32 tx_req; + int ret; + + if (!venus_is_valid_state(hdev)) + return -EINVAL; + + queue = &hdev->queues[IFACEQ_MSG_IDX]; + + ret = venus_read_queue(hdev, queue, pkt, &tx_req); + if (ret) + return ret; + + if (tx_req) + venus_soft_int(hdev); + + return 0; +} + +static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt) +{ + int ret; + + mutex_lock(&hdev->lock); + ret = venus_iface_msgq_read_nolock(hdev, pkt); + mutex_unlock(&hdev->lock); + + return ret; +} + +static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev, + void *pkt) +{ + struct iface_queue *queue; + u32 tx_req; + int ret; + + ret = venus_is_valid_state(hdev); + if (!ret) + return -EINVAL; + + queue = &hdev->queues[IFACEQ_DBG_IDX]; + + ret = venus_read_queue(hdev, queue, pkt, &tx_req); + if (ret) + return ret; + + if (tx_req) + venus_soft_int(hdev); + + return 0; +} + +static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt) +{ + int ret; + + if (!pkt) + return -EINVAL; + + mutex_lock(&hdev->lock); + ret = venus_iface_dbgq_read_nolock(hdev, pkt); + mutex_unlock(&hdev->lock); + + return ret; +} + +static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr) +{ + qhdr->status = 1; + qhdr->type = IFACEQ_DFLT_QHDR; + qhdr->q_size = IFACEQ_QUEUE_SIZE / 4; + qhdr->pkt_size = 0; + qhdr->rx_wm = 1; + qhdr->tx_wm = 1; + qhdr->rx_req = 1; + qhdr->tx_req = 0; + qhdr->rx_irq_status = 0; + qhdr->tx_irq_status = 0; + qhdr->read_idx = 0; + qhdr->write_idx = 0; +} + +static void venus_interface_queues_release(struct venus_hfi_device *hdev) +{ + mutex_lock(&hdev->lock); + + venus_free(hdev, &hdev->ifaceq_table); + venus_free(hdev, &hdev->sfr); + + memset(hdev->queues, 0, sizeof(hdev->queues)); + memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table)); + memset(&hdev->sfr, 0, sizeof(hdev->sfr)); + + mutex_unlock(&hdev->lock); +} + +static int venus_interface_queues_init(struct venus_hfi_device *hdev) +{ + struct hfi_queue_table_header *tbl_hdr; + struct iface_queue *queue; + struct hfi_sfr *sfr; + struct mem_desc desc = {0}; + unsigned int offset; + unsigned int i; + int ret; + + ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE); + if (ret) + return ret; + + hdev->ifaceq_table.kva = desc.kva; + hdev->ifaceq_table.da = desc.da; + hdev->ifaceq_table.size = IFACEQ_TABLE_SIZE; + offset = hdev->ifaceq_table.size; + + for (i = 0; i < IFACEQ_NUM; i++) { + queue = &hdev->queues[i]; + queue->qmem.da = desc.da + offset; + queue->qmem.kva = desc.kva + offset; + queue->qmem.size = IFACEQ_QUEUE_SIZE; + offset += queue->qmem.size; + queue->qhdr = + IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i); + + venus_set_qhdr_defaults(queue->qhdr); + + queue->qhdr->start_addr = queue->qmem.da; + + if (i == IFACEQ_CMD_IDX) + queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q; + else if (i == IFACEQ_MSG_IDX) + queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q; + else if (i == IFACEQ_DBG_IDX) + queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q; + } + + tbl_hdr = hdev->ifaceq_table.kva; + tbl_hdr->version = 0; + tbl_hdr->size = IFACEQ_TABLE_SIZE; + tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header); + tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header); + tbl_hdr->num_q = IFACEQ_NUM; + tbl_hdr->num_active_q = IFACEQ_NUM; + + /* + * Set receive request to zero on debug queue as there is no + * need of interrupt from video hardware for debug messages + */ + queue = &hdev->queues[IFACEQ_DBG_IDX]; + queue->qhdr->rx_req = 0; + + ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE); + if (ret) { + hdev->sfr.da = 0; + } else { + hdev->sfr.da = desc.da; + hdev->sfr.kva = desc.kva; + hdev->sfr.size = ALIGNED_SFR_SIZE; + sfr = hdev->sfr.kva; + sfr->buf_size = ALIGNED_SFR_SIZE; + } + + /* ensure table and queue header structs are settled in memory */ + wmb(); + + return 0; +} + +static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug) +{ + struct hfi_sys_set_property_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + int ret; + + pkt = (struct hfi_sys_set_property_pkt *)packet; + + pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug); + + ret = venus_iface_cmdq_write(hdev, pkt); + if (ret) + return ret; + + return 0; +} + +static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode) +{ + struct hfi_sys_set_property_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + int ret; + + pkt = (struct hfi_sys_set_property_pkt *)packet; + + pkt_sys_coverage_config(pkt, mode); + + ret = venus_iface_cmdq_write(hdev, pkt); + if (ret) + return ret; + + return 0; +} + +static int venus_sys_set_idle_message(struct venus_hfi_device *hdev, + bool enable) +{ + struct hfi_sys_set_property_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + int ret; + + if (!enable) + return 0; + + pkt = (struct hfi_sys_set_property_pkt *)packet; + + pkt_sys_idle_indicator(pkt, enable); + + ret = venus_iface_cmdq_write(hdev, pkt); + if (ret) + return ret; + + return 0; +} + +static int venus_sys_set_power_control(struct venus_hfi_device *hdev, + bool enable) +{ + struct hfi_sys_set_property_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + int ret; + + pkt = (struct hfi_sys_set_property_pkt *)packet; + + pkt_sys_power_control(pkt, enable); + + ret = venus_iface_cmdq_write(hdev, pkt); + if (ret) + return ret; + + return 0; +} + +static int venus_get_queue_size(struct venus_hfi_device *hdev, + unsigned int index) +{ + struct hfi_queue_header *qhdr; + + if (index >= IFACEQ_NUM) + return -EINVAL; + + qhdr = hdev->queues[index].qhdr; + if (!qhdr) + return -EINVAL; + + return abs(qhdr->read_idx - qhdr->write_idx); +} + +static int venus_sys_set_default_properties(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + int ret; + + ret = venus_sys_set_debug(hdev, venus_fw_debug); + if (ret) + dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret); + + ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator); + if (ret) + dev_warn(dev, "setting idle response ON failed (%d)\n", ret); + + ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode); + if (ret) + dev_warn(dev, "setting hw power collapse ON failed (%d)\n", + ret); + + return ret; +} + +static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_pkt pkt; + + pkt_session_cmd(&pkt, pkt_type, inst); + + return venus_iface_cmdq_write(hdev, &pkt); +} + +static void venus_flush_debug_queue(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + void *packet = hdev->dbg_buf; + + while (!venus_iface_dbgq_read(hdev, packet)) { + struct hfi_msg_sys_coverage_pkt *pkt = packet; + + if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) { + struct hfi_msg_sys_debug_pkt *pkt = packet; + + dev_dbg(dev, "%s", pkt->msg_data); + } + } +} + +static int venus_prepare_power_collapse(struct venus_hfi_device *hdev, + bool wait) +{ + unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout); + struct hfi_sys_pc_prep_pkt pkt; + int ret; + + init_completion(&hdev->pwr_collapse_prep); + + pkt_sys_pc_prep(&pkt); + + ret = venus_iface_cmdq_write(hdev, &pkt); + if (ret) + return ret; + + if (!wait) + return 0; + + ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout); + if (!ret) { + venus_flush_debug_queue(hdev); + return -ETIMEDOUT; + } + + return 0; +} + +static int venus_are_queues_empty(struct venus_hfi_device *hdev) +{ + int ret1, ret2; + + ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX); + if (ret1 < 0) + return ret1; + + ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX); + if (ret2 < 0) + return ret2; + + if (!ret1 && !ret2) + return 1; + + return 0; +} + +static void venus_sfr_print(struct venus_hfi_device *hdev) +{ + struct device *dev = hdev->core->dev; + struct hfi_sfr *sfr = hdev->sfr.kva; + void *p; + + if (!sfr) + return; + + p = memchr(sfr->data, '\0', sfr->buf_size); + /* + * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates + * that Venus is in the process of crashing. + */ + if (!p) + sfr->data[sfr->buf_size - 1] = '\0'; + + dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data); +} + +static void venus_process_msg_sys_error(struct venus_hfi_device *hdev, + void *packet) +{ + struct hfi_msg_event_notify_pkt *event_pkt = packet; + + if (event_pkt->event_id != HFI_EVENT_SYS_ERROR) + return; + + venus_set_state(hdev, VENUS_STATE_DEINIT); + + /* + * Once SYS_ERROR received from HW, it is safe to halt the AXI. + * With SYS_ERROR, Venus FW may have crashed and HW might be + * active and causing unnecessary transactions. Hence it is + * safe to stop all AXI transactions from venus subsystem. + */ + venus_halt_axi(hdev); + venus_sfr_print(hdev); +} + +static irqreturn_t venus_isr_thread(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + const struct venus_resources *res; + void *pkt; + u32 msg_ret; + + if (!hdev) + return IRQ_NONE; + + res = hdev->core->res; + pkt = hdev->pkt_buf; + + if (hdev->irq_status & WRAPPER_INTR_STATUS_A2HWD_MASK) { + venus_sfr_print(hdev); + hfi_process_watchdog_timeout(core); + } + + while (!venus_iface_msgq_read(hdev, pkt)) { + msg_ret = hfi_process_msg_packet(core, pkt); + switch (msg_ret) { + case HFI_MSG_EVENT_NOTIFY: + venus_process_msg_sys_error(hdev, pkt); + break; + case HFI_MSG_SYS_INIT: + venus_hfi_core_set_resource(core, res->vmem_id, + res->vmem_size, + res->vmem_addr, + hdev); + break; + case HFI_MSG_SYS_RELEASE_RESOURCE: + complete(&hdev->release_resource); + break; + case HFI_MSG_SYS_PC_PREP: + complete(&hdev->pwr_collapse_prep); + break; + default: + break; + } + } + + venus_flush_debug_queue(hdev); + + return IRQ_HANDLED; +} + +static irqreturn_t venus_isr(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + u32 status; + + if (!hdev) + return IRQ_NONE; + + status = venus_readl(hdev, WRAPPER_INTR_STATUS); + + if (status & WRAPPER_INTR_STATUS_A2H_MASK || + status & WRAPPER_INTR_STATUS_A2HWD_MASK || + status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK) + hdev->irq_status = status; + + venus_writel(hdev, CPU_CS_A2HSOFTINTCLR, 1); + venus_writel(hdev, WRAPPER_INTR_CLEAR, status); + + return IRQ_WAKE_THREAD; +} + +static int venus_core_init(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct device *dev = core->dev; + struct hfi_sys_get_property_pkt version_pkt; + struct hfi_sys_init_pkt pkt; + int ret; + + pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX); + + venus_set_state(hdev, VENUS_STATE_INIT); + + ret = venus_iface_cmdq_write(hdev, &pkt); + if (ret) + return ret; + + pkt_sys_image_version(&version_pkt); + + ret = venus_iface_cmdq_write(hdev, &version_pkt); + if (ret) + dev_warn(dev, "failed to send image version pkt to fw\n"); + + return 0; +} + +static int venus_core_deinit(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + + venus_set_state(hdev, VENUS_STATE_DEINIT); + hdev->suspended = true; + hdev->power_enabled = false; + + return 0; +} + +static int venus_core_ping(struct venus_core *core, u32 cookie) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct hfi_sys_ping_pkt pkt; + + pkt_sys_ping(&pkt, cookie); + + return venus_iface_cmdq_write(hdev, &pkt); +} + +static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct hfi_sys_test_ssr_pkt pkt; + int ret; + + ret = pkt_sys_ssr_cmd(&pkt, trigger_type); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, &pkt); +} + +static int venus_session_init(struct venus_inst *inst, u32 session_type, + u32 codec) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_init_pkt pkt; + int ret; + + ret = venus_sys_set_default_properties(hdev); + if (ret) + return ret; + + ret = pkt_session_init(&pkt, inst, session_type, codec); + if (ret) + goto err; + + ret = venus_iface_cmdq_write(hdev, &pkt); + if (ret) + goto err; + + return 0; + +err: + venus_flush_debug_queue(hdev); + return ret; +} + +static int venus_session_end(struct venus_inst *inst) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct device *dev = hdev->core->dev; + + if (venus_fw_coverage) { + if (venus_sys_set_coverage(hdev, venus_fw_coverage)) + dev_warn(dev, "fw coverage msg ON failed\n"); + } + + return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END); +} + +static int venus_session_abort(struct venus_inst *inst) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + + venus_flush_debug_queue(hdev); + + return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT); +} + +static int venus_session_flush(struct venus_inst *inst, u32 flush_mode) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_flush_pkt pkt; + int ret; + + ret = pkt_session_flush(&pkt, inst, flush_mode); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, &pkt); +} + +static int venus_session_start(struct venus_inst *inst) +{ + return venus_session_cmd(inst, HFI_CMD_SESSION_START); +} + +static int venus_session_stop(struct venus_inst *inst) +{ + return venus_session_cmd(inst, HFI_CMD_SESSION_STOP); +} + +static int venus_session_continue(struct venus_inst *inst) +{ + return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE); +} + +static int venus_session_etb(struct venus_inst *inst, + struct hfi_frame_data *in_frame) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + u32 session_type = inst->session_type; + int ret; + + if (session_type == VIDC_SESSION_TYPE_DEC) { + struct hfi_session_empty_buffer_compressed_pkt pkt; + + ret = pkt_session_etb_decoder(&pkt, inst, in_frame); + if (ret) + return ret; + + ret = venus_iface_cmdq_write(hdev, &pkt); + } else if (session_type == VIDC_SESSION_TYPE_ENC) { + struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt; + + ret = pkt_session_etb_encoder(&pkt, inst, in_frame); + if (ret) + return ret; + + ret = venus_iface_cmdq_write(hdev, &pkt); + } else { + ret = -EINVAL; + } + + return ret; +} + +static int venus_session_ftb(struct venus_inst *inst, + struct hfi_frame_data *out_frame) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_fill_buffer_pkt pkt; + int ret; + + ret = pkt_session_ftb(&pkt, inst, out_frame); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, &pkt); +} + +static int venus_session_set_buffers(struct venus_inst *inst, + struct hfi_buffer_desc *bd) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_set_buffers_pkt *pkt; + u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE]; + int ret; + + if (bd->buffer_type == HFI_BUFFER_INPUT) + return 0; + + pkt = (struct hfi_session_set_buffers_pkt *)packet; + + ret = pkt_session_set_buffers(pkt, inst, bd); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, pkt); +} + +static int venus_session_unset_buffers(struct venus_inst *inst, + struct hfi_buffer_desc *bd) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_release_buffer_pkt *pkt; + u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE]; + int ret; + + if (bd->buffer_type == HFI_BUFFER_INPUT) + return 0; + + pkt = (struct hfi_session_release_buffer_pkt *)packet; + + ret = pkt_session_unset_buffers(pkt, inst, bd); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, pkt); +} + +static int venus_session_load_res(struct venus_inst *inst) +{ + return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES); +} + +static int venus_session_release_res(struct venus_inst *inst) +{ + return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES); +} + +static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr, + u32 seq_hdr_len) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_parse_sequence_header_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + int ret; + + pkt = (struct hfi_session_parse_sequence_header_pkt *)packet; + + ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len); + if (ret) + return ret; + + ret = venus_iface_cmdq_write(hdev, pkt); + if (ret) + return ret; + + return 0; +} + +static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr, + u32 seq_hdr_len) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_get_sequence_header_pkt *pkt; + u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; + int ret; + + pkt = (struct hfi_session_get_sequence_header_pkt *)packet; + + ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, pkt); +} + +static int venus_session_set_property(struct venus_inst *inst, u32 ptype, + void *pdata) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_set_property_pkt *pkt; + u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE]; + int ret; + + pkt = (struct hfi_session_set_property_pkt *)packet; + + ret = pkt_session_set_property(pkt, inst, ptype, pdata); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, pkt); +} + +static int venus_session_get_property(struct venus_inst *inst, u32 ptype) +{ + struct venus_hfi_device *hdev = to_hfi_priv(inst->core); + struct hfi_session_get_property_pkt pkt; + int ret; + + ret = pkt_session_get_property(&pkt, inst, ptype); + if (ret) + return ret; + + return venus_iface_cmdq_write(hdev, &pkt); +} + +static int venus_resume(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + int ret = 0; + + mutex_lock(&hdev->lock); + + if (!hdev->suspended) + goto unlock; + + ret = venus_power_on(hdev); + +unlock: + if (!ret) + hdev->suspended = false; + + mutex_unlock(&hdev->lock); + + return ret; +} + +static int venus_suspend_1xx(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct device *dev = core->dev; + u32 ctrl_status; + int ret; + + if (!hdev->power_enabled || hdev->suspended) + return 0; + + mutex_lock(&hdev->lock); + ret = venus_is_valid_state(hdev); + mutex_unlock(&hdev->lock); + + if (!ret) { + dev_err(dev, "bad state, cannot suspend\n"); + return -EINVAL; + } + + ret = venus_prepare_power_collapse(hdev, true); + if (ret) { + dev_err(dev, "prepare for power collapse fail (%d)\n", ret); + return ret; + } + + mutex_lock(&hdev->lock); + + if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) { + mutex_unlock(&hdev->lock); + return -EINVAL; + } + + ret = venus_are_queues_empty(hdev); + if (ret < 0 || !ret) { + mutex_unlock(&hdev->lock); + return -EINVAL; + } + + ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) { + mutex_unlock(&hdev->lock); + return -EINVAL; + } + + ret = venus_power_off(hdev); + if (ret) { + mutex_unlock(&hdev->lock); + return ret; + } + + hdev->suspended = true; + + mutex_unlock(&hdev->lock); + + return 0; +} + +static int venus_suspend_3xx(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + struct device *dev = core->dev; + u32 ctrl_status, wfi_status; + int ret; + int cnt = 100; + + if (!hdev->power_enabled || hdev->suspended) + return 0; + + mutex_lock(&hdev->lock); + ret = venus_is_valid_state(hdev); + mutex_unlock(&hdev->lock); + + if (!ret) { + dev_err(dev, "bad state, cannot suspend\n"); + return -EINVAL; + } + + ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) { + wfi_status = venus_readl(hdev, WRAPPER_CPU_STATUS); + ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + + ret = venus_prepare_power_collapse(hdev, false); + if (ret) { + dev_err(dev, "prepare for power collapse fail (%d)\n", + ret); + return ret; + } + + cnt = 100; + while (cnt--) { + wfi_status = venus_readl(hdev, WRAPPER_CPU_STATUS); + ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); + if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY && + wfi_status & BIT(0)) + break; + usleep_range(1000, 1500); + } + } + + mutex_lock(&hdev->lock); + + ret = venus_power_off(hdev); + if (ret) { + dev_err(dev, "venus_power_off (%d)\n", ret); + mutex_unlock(&hdev->lock); + return ret; + } + + hdev->suspended = true; + + mutex_unlock(&hdev->lock); + + return 0; +} + +static int venus_suspend(struct venus_core *core) +{ + if (core->res->hfi_version == HFI_VERSION_3XX) + return venus_suspend_3xx(core); + + return venus_suspend_1xx(core); +} + +static const struct hfi_ops venus_hfi_ops = { + .core_init = venus_core_init, + .core_deinit = venus_core_deinit, + .core_ping = venus_core_ping, + .core_trigger_ssr = venus_core_trigger_ssr, + + .session_init = venus_session_init, + .session_end = venus_session_end, + .session_abort = venus_session_abort, + .session_flush = venus_session_flush, + .session_start = venus_session_start, + .session_stop = venus_session_stop, + .session_continue = venus_session_continue, + .session_etb = venus_session_etb, + .session_ftb = venus_session_ftb, + .session_set_buffers = venus_session_set_buffers, + .session_unset_buffers = venus_session_unset_buffers, + .session_load_res = venus_session_load_res, + .session_release_res = venus_session_release_res, + .session_parse_seq_hdr = venus_session_parse_seq_hdr, + .session_get_seq_hdr = venus_session_get_seq_hdr, + .session_set_property = venus_session_set_property, + .session_get_property = venus_session_get_property, + + .resume = venus_resume, + .suspend = venus_suspend, + + .isr = venus_isr, + .isr_thread = venus_isr_thread, +}; + +void venus_hfi_destroy(struct venus_core *core) +{ + struct venus_hfi_device *hdev = to_hfi_priv(core); + + venus_interface_queues_release(hdev); + mutex_destroy(&hdev->lock); + kfree(hdev); + core->priv = NULL; + core->ops = NULL; +} + +int venus_hfi_create(struct venus_core *core) +{ + struct venus_hfi_device *hdev; + int ret; + + hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + mutex_init(&hdev->lock); + + hdev->core = core; + hdev->suspended = true; + core->priv = hdev; + core->ops = &venus_hfi_ops; + core->core_caps = ENC_ROTATION_CAPABILITY | ENC_SCALING_CAPABILITY | + ENC_DEINTERLACE_CAPABILITY | + DEC_MULTI_STREAM_CAPABILITY; + + ret = venus_interface_queues_init(hdev); + if (ret) + goto err_kfree; + + return 0; + +err_kfree: + kfree(hdev); + core->priv = NULL; + core->ops = NULL; + return ret; +} diff --git a/drivers/media/platform/qcom/venus/hfi_venus.h b/drivers/media/platform/qcom/venus/hfi_venus.h new file mode 100644 index 000000000000..885923354033 --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_venus.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __VENUS_HFI_VENUS_H__ +#define __VENUS_HFI_VENUS_H__ + +struct venus_core; + +void venus_hfi_destroy(struct venus_core *core); +int venus_hfi_create(struct venus_core *core); + +#endif diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h new file mode 100644 index 000000000000..98cc350113ab --- /dev/null +++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __VENUS_HFI_VENUS_IO_H__ +#define __VENUS_HFI_VENUS_IO_H__ + +#define VBIF_BASE 0x80000 + +#define VBIF_AXI_HALT_CTRL0 (VBIF_BASE + 0x208) +#define VBIF_AXI_HALT_CTRL1 (VBIF_BASE + 0x20c) + +#define VBIF_AXI_HALT_CTRL0_HALT_REQ BIT(0) +#define VBIF_AXI_HALT_CTRL1_HALT_ACK BIT(0) +#define VBIF_AXI_HALT_ACK_TIMEOUT_US 500000 + +#define CPU_BASE 0xc0000 +#define CPU_CS_BASE (CPU_BASE + 0x12000) +#define CPU_IC_BASE (CPU_BASE + 0x1f000) + +#define CPU_CS_A2HSOFTINTCLR (CPU_CS_BASE + 0x1c) + +#define VIDC_CTRL_INIT (CPU_CS_BASE + 0x48) +#define VIDC_CTRL_INIT_RESERVED_BITS31_1_MASK 0xfffffffe +#define VIDC_CTRL_INIT_RESERVED_BITS31_1_SHIFT 1 +#define VIDC_CTRL_INIT_CTRL_MASK 0x1 +#define VIDC_CTRL_INIT_CTRL_SHIFT 0 + +/* HFI control status */ +#define CPU_CS_SCIACMDARG0 (CPU_CS_BASE + 0x4c) +#define CPU_CS_SCIACMDARG0_MASK 0xff +#define CPU_CS_SCIACMDARG0_SHIFT 0x0 +#define CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK 0xfe +#define CPU_CS_SCIACMDARG0_ERROR_STATUS_SHIFT 0x1 +#define CPU_CS_SCIACMDARG0_INIT_STATUS_MASK 0x1 +#define CPU_CS_SCIACMDARG0_INIT_STATUS_SHIFT 0x0 +#define CPU_CS_SCIACMDARG0_PC_READY BIT(8) +#define CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK BIT(30) + +/* HFI queue table info */ +#define CPU_CS_SCIACMDARG1 (CPU_CS_BASE + 0x50) + +/* HFI queue table address */ +#define CPU_CS_SCIACMDARG2 (CPU_CS_BASE + 0x54) + +/* Venus cpu */ +#define CPU_CS_SCIACMDARG3 (CPU_CS_BASE + 0x58) + +#define SFR_ADDR (CPU_CS_BASE + 0x5c) +#define MMAP_ADDR (CPU_CS_BASE + 0x60) +#define UC_REGION_ADDR (CPU_CS_BASE + 0x64) +#define UC_REGION_SIZE (CPU_CS_BASE + 0x68) + +#define CPU_IC_SOFTINT (CPU_IC_BASE + 0x18) +#define CPU_IC_SOFTINT_H2A_MASK 0x8000 +#define CPU_IC_SOFTINT_H2A_SHIFT 0xf + +/* Venus wrapper */ +#define WRAPPER_BASE 0x000e0000 + +#define WRAPPER_HW_VERSION (WRAPPER_BASE + 0x00) +#define WRAPPER_HW_VERSION_MAJOR_VERSION_MASK 0x78000000 +#define WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT 28 +#define WRAPPER_HW_VERSION_MINOR_VERSION_MASK 0xfff0000 +#define WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT 16 +#define WRAPPER_HW_VERSION_STEP_VERSION_MASK 0xffff + +#define WRAPPER_CLOCK_CONFIG (WRAPPER_BASE + 0x04) + +#define WRAPPER_INTR_STATUS (WRAPPER_BASE + 0x0c) +#define WRAPPER_INTR_STATUS_A2HWD_MASK 0x10 +#define WRAPPER_INTR_STATUS_A2HWD_SHIFT 0x4 +#define WRAPPER_INTR_STATUS_A2H_MASK 0x4 +#define WRAPPER_INTR_STATUS_A2H_SHIFT 0x2 + +#define WRAPPER_INTR_MASK (WRAPPER_BASE + 0x10) +#define WRAPPER_INTR_MASK_A2HWD_BASK 0x10 +#define WRAPPER_INTR_MASK_A2HWD_SHIFT 0x4 +#define WRAPPER_INTR_MASK_A2HVCODEC_MASK 0x8 +#define WRAPPER_INTR_MASK_A2HVCODEC_SHIFT 0x3 +#define WRAPPER_INTR_MASK_A2HCPU_MASK 0x4 +#define WRAPPER_INTR_MASK_A2HCPU_SHIFT 0x2 + +#define WRAPPER_INTR_CLEAR (WRAPPER_BASE + 0x14) +#define WRAPPER_INTR_CLEAR_A2HWD_MASK 0x10 +#define WRAPPER_INTR_CLEAR_A2HWD_SHIFT 0x4 +#define WRAPPER_INTR_CLEAR_A2H_MASK 0x4 +#define WRAPPER_INTR_CLEAR_A2H_SHIFT 0x2 + +#define WRAPPER_POWER_STATUS (WRAPPER_BASE + 0x44) +#define WRAPPER_VDEC_VCODEC_POWER_CONTROL (WRAPPER_BASE + 0x48) +#define WRAPPER_VENC_VCODEC_POWER_CONTROL (WRAPPER_BASE + 0x4c) +#define WRAPPER_VDEC_VENC_AHB_BRIDGE_SYNC_RESET (WRAPPER_BASE + 0x64) + +#define WRAPPER_CPU_CLOCK_CONFIG (WRAPPER_BASE + 0x2000) +#define WRAPPER_CPU_AXI_HALT (WRAPPER_BASE + 0x2008) +#define WRAPPER_CPU_AXI_HALT_STATUS (WRAPPER_BASE + 0x200c) + +#define WRAPPER_CPU_CGC_DIS (WRAPPER_BASE + 0x2010) +#define WRAPPER_CPU_STATUS (WRAPPER_BASE + 0x2014) +#define WRAPPER_SW_RESET (WRAPPER_BASE + 0x3000) + +#endif diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c new file mode 100644 index 000000000000..da611a5eb670 --- /dev/null +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -0,0 +1,1171 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-event.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-dma-sg.h> + +#include "hfi_venus_io.h" +#include "core.h" +#include "helpers.h" +#include "vdec.h" + +static u32 get_framesize_uncompressed(unsigned int plane, u32 width, u32 height) +{ + u32 y_stride, uv_stride, y_plane; + u32 y_sclines, uv_sclines, uv_plane; + u32 size; + + y_stride = ALIGN(width, 128); + uv_stride = ALIGN(width, 128); + y_sclines = ALIGN(height, 32); + uv_sclines = ALIGN(((height + 1) >> 1), 16); + + y_plane = y_stride * y_sclines; + uv_plane = uv_stride * uv_sclines + SZ_4K; + size = y_plane + uv_plane + SZ_8K; + + return ALIGN(size, SZ_4K); +} + +static u32 get_framesize_compressed(unsigned int width, unsigned int height) +{ + return ((width * height * 3 / 2) / 2) + 128; +} + +/* + * Three resons to keep MPLANE formats (despite that the number of planes + * currently is one): + * - the MPLANE formats allow only one plane to be used + * - the downstream driver use MPLANE formats too + * - future firmware versions could add support for >1 planes + */ +static const struct venus_format vdec_formats[] = { + { + .pixfmt = V4L2_PIX_FMT_NV12, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_MPEG4, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_MPEG2, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_H263, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_G, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_L, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_H264, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_VP8, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_VP9, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_XVID, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + }, +}; + +static const struct venus_format * +find_format(struct venus_inst *inst, u32 pixfmt, u32 type) +{ + const struct venus_format *fmt = vdec_formats; + unsigned int size = ARRAY_SIZE(vdec_formats); + unsigned int i; + + for (i = 0; i < size; i++) { + if (fmt[i].pixfmt == pixfmt) + break; + } + + if (i == size || fmt[i].type != type) + return NULL; + + if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + !venus_helper_check_codec(inst, fmt[i].pixfmt)) + return NULL; + + return &fmt[i]; +} + +static const struct venus_format * +find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type) +{ + const struct venus_format *fmt = vdec_formats; + unsigned int size = ARRAY_SIZE(vdec_formats); + unsigned int i, k = 0; + + if (index > size) + return NULL; + + for (i = 0; i < size; i++) { + if (fmt[i].type != type) + continue; + if (k == index) + break; + k++; + } + + if (i == size) + return NULL; + + if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + !venus_helper_check_codec(inst, fmt[i].pixfmt)) + return NULL; + + return &fmt[i]; +} + +static const struct venus_format * +vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt; + const struct venus_format *fmt; + unsigned int p; + + memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved)); + memset(pixmp->reserved, 0, sizeof(pixmp->reserved)); + + fmt = find_format(inst, pixmp->pixelformat, f->type); + if (!fmt) { + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + pixmp->pixelformat = V4L2_PIX_FMT_NV12; + else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + pixmp->pixelformat = V4L2_PIX_FMT_H264; + else + return NULL; + fmt = find_format(inst, pixmp->pixelformat, f->type); + pixmp->width = 1280; + pixmp->height = 720; + } + + pixmp->width = clamp(pixmp->width, inst->cap_width.min, + inst->cap_width.max); + pixmp->height = clamp(pixmp->height, inst->cap_height.min, + inst->cap_height.max); + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + pixmp->height = ALIGN(pixmp->height, 32); + + if (pixmp->field == V4L2_FIELD_ANY) + pixmp->field = V4L2_FIELD_NONE; + pixmp->num_planes = fmt->num_planes; + pixmp->flags = 0; + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + for (p = 0; p < pixmp->num_planes; p++) { + pfmt[p].sizeimage = + get_framesize_uncompressed(p, pixmp->width, + pixmp->height); + pfmt[p].bytesperline = ALIGN(pixmp->width, 128); + } + } else { + pfmt[0].sizeimage = get_framesize_compressed(pixmp->width, + pixmp->height); + pfmt[0].bytesperline = 0; + } + + return fmt; +} + +static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct venus_inst *inst = to_inst(file); + + vdec_try_fmt_common(inst, f); + + return 0; +} + +static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt = NULL; + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + fmt = inst->fmt_cap; + else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + fmt = inst->fmt_out; + + if (inst->reconfig) { + struct v4l2_format format = {}; + + inst->out_width = inst->reconfig_width; + inst->out_height = inst->reconfig_height; + inst->reconfig = false; + + format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + format.fmt.pix_mp.pixelformat = inst->fmt_cap->pixfmt; + format.fmt.pix_mp.width = inst->out_width; + format.fmt.pix_mp.height = inst->out_height; + + vdec_try_fmt_common(inst, &format); + + inst->width = format.fmt.pix_mp.width; + inst->height = format.fmt.pix_mp.height; + } + + pixmp->pixelformat = fmt->pixfmt; + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + pixmp->width = inst->width; + pixmp->height = inst->height; + pixmp->colorspace = inst->colorspace; + pixmp->ycbcr_enc = inst->ycbcr_enc; + pixmp->quantization = inst->quantization; + pixmp->xfer_func = inst->xfer_func; + } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + pixmp->width = inst->out_width; + pixmp->height = inst->out_height; + } + + vdec_try_fmt_common(inst, f); + + return 0; +} + +static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct venus_inst *inst = to_inst(file); + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + struct v4l2_pix_format_mplane orig_pixmp; + const struct venus_format *fmt; + struct v4l2_format format; + u32 pixfmt_out = 0, pixfmt_cap = 0; + + orig_pixmp = *pixmp; + + fmt = vdec_try_fmt_common(inst, f); + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + pixfmt_out = pixmp->pixelformat; + pixfmt_cap = inst->fmt_cap->pixfmt; + } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + pixfmt_cap = pixmp->pixelformat; + pixfmt_out = inst->fmt_out->pixfmt; + } + + memset(&format, 0, sizeof(format)); + + format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + format.fmt.pix_mp.pixelformat = pixfmt_out; + format.fmt.pix_mp.width = orig_pixmp.width; + format.fmt.pix_mp.height = orig_pixmp.height; + vdec_try_fmt_common(inst, &format); + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + inst->out_width = format.fmt.pix_mp.width; + inst->out_height = format.fmt.pix_mp.height; + inst->colorspace = pixmp->colorspace; + inst->ycbcr_enc = pixmp->ycbcr_enc; + inst->quantization = pixmp->quantization; + inst->xfer_func = pixmp->xfer_func; + } + + memset(&format, 0, sizeof(format)); + + format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + format.fmt.pix_mp.pixelformat = pixfmt_cap; + format.fmt.pix_mp.width = orig_pixmp.width; + format.fmt.pix_mp.height = orig_pixmp.height; + vdec_try_fmt_common(inst, &format); + + inst->width = format.fmt.pix_mp.width; + inst->height = format.fmt.pix_mp.height; + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->fmt_out = fmt; + else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + inst->fmt_cap = fmt; + + return 0; +} + +static int +vdec_g_selection(struct file *file, void *fh, struct v4l2_selection *s) +{ + struct venus_inst *inst = to_inst(file); + + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && + s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + switch (s->target) { + case V4L2_SEL_TGT_CROP_BOUNDS: + case V4L2_SEL_TGT_CROP_DEFAULT: + case V4L2_SEL_TGT_CROP: + if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + s->r.width = inst->out_width; + s->r.height = inst->out_height; + break; + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + case V4L2_SEL_TGT_COMPOSE_PADDED: + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + s->r.width = inst->width; + s->r.height = inst->height; + break; + case V4L2_SEL_TGT_COMPOSE_DEFAULT: + case V4L2_SEL_TGT_COMPOSE: + if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + s->r.width = inst->out_width; + s->r.height = inst->out_height; + break; + default: + return -EINVAL; + } + + s->r.top = 0; + s->r.left = 0; + + return 0; +} + +static int +vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap) +{ + strlcpy(cap->driver, "qcom-venus", sizeof(cap->driver)); + strlcpy(cap->card, "Qualcomm Venus video decoder", sizeof(cap->card)); + strlcpy(cap->bus_info, "platform:qcom-venus", sizeof(cap->bus_info)); + + return 0; +} + +static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + + memset(f->reserved, 0, sizeof(f->reserved)); + + fmt = find_format_by_index(inst, f->index, f->type); + if (!fmt) + return -EINVAL; + + f->pixelformat = fmt->pixfmt; + + return 0; +} + +static int vdec_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) +{ + struct venus_inst *inst = to_inst(file); + struct v4l2_captureparm *cap = &a->parm.capture; + struct v4l2_fract *timeperframe = &cap->timeperframe; + u64 us_per_frame, fps; + + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return -EINVAL; + + memset(cap->reserved, 0, sizeof(cap->reserved)); + if (!timeperframe->denominator) + timeperframe->denominator = inst->timeperframe.denominator; + if (!timeperframe->numerator) + timeperframe->numerator = inst->timeperframe.numerator; + cap->readbuffers = 0; + cap->extendedmode = 0; + cap->capability = V4L2_CAP_TIMEPERFRAME; + us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC; + do_div(us_per_frame, timeperframe->denominator); + + if (!us_per_frame) + return -EINVAL; + + fps = (u64)USEC_PER_SEC; + do_div(fps, us_per_frame); + + inst->fps = fps; + inst->timeperframe = *timeperframe; + + return 0; +} + +static int vdec_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + + fmt = find_format(inst, fsize->pixel_format, + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); + if (!fmt) { + fmt = find_format(inst, fsize->pixel_format, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + if (!fmt) + return -EINVAL; + } + + if (fsize->index) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + + fsize->stepwise.min_width = inst->cap_width.min; + fsize->stepwise.max_width = inst->cap_width.max; + fsize->stepwise.step_width = inst->cap_width.step_size; + fsize->stepwise.min_height = inst->cap_height.min; + fsize->stepwise.max_height = inst->cap_height.max; + fsize->stepwise.step_height = inst->cap_height.step_size; + + return 0; +} + +static int vdec_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + switch (sub->type) { + case V4L2_EVENT_EOS: + return v4l2_event_subscribe(fh, sub, 2, NULL); + case V4L2_EVENT_SOURCE_CHANGE: + return v4l2_src_change_event_subscribe(fh, sub); + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + default: + return -EINVAL; + } +} + +static int +vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) +{ + if (cmd->cmd != V4L2_DEC_CMD_STOP) + return -EINVAL; + + return 0; +} + +static int +vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd) +{ + struct venus_inst *inst = to_inst(file); + int ret; + + ret = vdec_try_decoder_cmd(file, fh, cmd); + if (ret) + return ret; + + mutex_lock(&inst->lock); + inst->cmd_stop = true; + mutex_unlock(&inst->lock); + + hfi_session_flush(inst); + + return 0; +} + +static const struct v4l2_ioctl_ops vdec_ioctl_ops = { + .vidioc_querycap = vdec_querycap, + .vidioc_enum_fmt_vid_cap_mplane = vdec_enum_fmt, + .vidioc_enum_fmt_vid_out_mplane = vdec_enum_fmt, + .vidioc_s_fmt_vid_cap_mplane = vdec_s_fmt, + .vidioc_s_fmt_vid_out_mplane = vdec_s_fmt, + .vidioc_g_fmt_vid_cap_mplane = vdec_g_fmt, + .vidioc_g_fmt_vid_out_mplane = vdec_g_fmt, + .vidioc_try_fmt_vid_cap_mplane = vdec_try_fmt, + .vidioc_try_fmt_vid_out_mplane = vdec_try_fmt, + .vidioc_g_selection = vdec_g_selection, + .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, + .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, + .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, + .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, + .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, + .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, + .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, + .vidioc_streamon = v4l2_m2m_ioctl_streamon, + .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, + .vidioc_s_parm = vdec_s_parm, + .vidioc_enum_framesizes = vdec_enum_framesizes, + .vidioc_subscribe_event = vdec_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + .vidioc_try_decoder_cmd = vdec_try_decoder_cmd, + .vidioc_decoder_cmd = vdec_decoder_cmd, +}; + +static int vdec_set_properties(struct venus_inst *inst) +{ + struct vdec_controls *ctr = &inst->controls.dec; + struct venus_core *core = inst->core; + struct hfi_enable en = { .enable = 1 }; + u32 ptype; + int ret; + + if (core->res->hfi_version == HFI_VERSION_1XX) { + ptype = HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER; + ret = hfi_session_set_property(inst, ptype, &en); + if (ret) + return ret; + } + + if (core->res->hfi_version == HFI_VERSION_3XX || + inst->cap_bufs_mode_dynamic) { + struct hfi_buffer_alloc_mode mode; + + ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE; + mode.type = HFI_BUFFER_OUTPUT; + mode.mode = HFI_BUFFER_MODE_DYNAMIC; + + ret = hfi_session_set_property(inst, ptype, &mode); + if (ret) + return ret; + } + + if (ctr->post_loop_deb_mode) { + ptype = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER; + en.enable = 1; + ret = hfi_session_set_property(inst, ptype, &en); + if (ret) + return ret; + } + + return 0; +} + +static int vdec_init_session(struct venus_inst *inst) +{ + int ret; + + ret = hfi_session_init(inst, inst->fmt_out->pixfmt); + if (ret) + return ret; + + ret = venus_helper_set_input_resolution(inst, inst->out_width, + inst->out_height); + if (ret) + goto deinit; + + ret = venus_helper_set_color_format(inst, inst->fmt_cap->pixfmt); + if (ret) + goto deinit; + + return 0; +deinit: + hfi_session_deinit(inst); + return ret; +} + +static int vdec_cap_num_buffers(struct venus_inst *inst, unsigned int *num) +{ + struct hfi_buffer_requirements bufreq; + int ret; + + ret = vdec_init_session(inst); + if (ret) + return ret; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq); + + *num = bufreq.count_actual; + + hfi_session_deinit(inst); + + return ret; +} + +static int vdec_queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct venus_inst *inst = vb2_get_drv_priv(q); + unsigned int p, num; + int ret = 0; + + if (*num_planes) { + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + *num_planes != inst->fmt_out->num_planes) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + *num_planes != inst->fmt_cap->num_planes) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + sizes[0] < inst->input_buf_size) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + sizes[0] < inst->output_buf_size) + return -EINVAL; + + return 0; + } + + switch (q->type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + *num_planes = inst->fmt_out->num_planes; + sizes[0] = get_framesize_compressed(inst->out_width, + inst->out_height); + inst->input_buf_size = sizes[0]; + inst->num_input_bufs = *num_buffers; + + ret = vdec_cap_num_buffers(inst, &num); + if (ret) + break; + + inst->num_output_bufs = num; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + *num_planes = inst->fmt_cap->num_planes; + + ret = vdec_cap_num_buffers(inst, &num); + if (ret) + break; + + *num_buffers = max(*num_buffers, num); + + for (p = 0; p < *num_planes; p++) + sizes[p] = get_framesize_uncompressed(p, inst->width, + inst->height); + + inst->num_output_bufs = *num_buffers; + inst->output_buf_size = sizes[0]; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int vdec_verify_conf(struct venus_inst *inst) +{ + struct hfi_buffer_requirements bufreq; + int ret; + + if (!inst->num_input_bufs || !inst->num_output_bufs) + return -EINVAL; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq); + if (ret) + return ret; + + if (inst->num_output_bufs < bufreq.count_actual || + inst->num_output_bufs < bufreq.count_min) + return -EINVAL; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq); + if (ret) + return ret; + + if (inst->num_input_bufs < bufreq.count_min) + return -EINVAL; + + return 0; +} + +static int vdec_start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct venus_inst *inst = vb2_get_drv_priv(q); + struct venus_core *core = inst->core; + u32 ptype; + int ret; + + mutex_lock(&inst->lock); + + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->streamon_out = 1; + else + inst->streamon_cap = 1; + + if (!(inst->streamon_out & inst->streamon_cap)) { + mutex_unlock(&inst->lock); + return 0; + } + + venus_helper_init_instance(inst); + + inst->reconfig = false; + inst->sequence_cap = 0; + inst->sequence_out = 0; + inst->cmd_stop = false; + + ret = vdec_init_session(inst); + if (ret) + goto bufs_done; + + ret = vdec_set_properties(inst); + if (ret) + goto deinit_sess; + + if (core->res->hfi_version == HFI_VERSION_3XX) { + struct hfi_buffer_size_actual buf_sz; + + ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL; + buf_sz.type = HFI_BUFFER_OUTPUT; + buf_sz.size = inst->output_buf_size; + + ret = hfi_session_set_property(inst, ptype, &buf_sz); + if (ret) + goto deinit_sess; + } + + ret = vdec_verify_conf(inst); + if (ret) + goto deinit_sess; + + ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs, + VB2_MAX_FRAME); + if (ret) + goto deinit_sess; + + ret = venus_helper_vb2_start_streaming(inst); + if (ret) + goto deinit_sess; + + mutex_unlock(&inst->lock); + + return 0; + +deinit_sess: + hfi_session_deinit(inst); +bufs_done: + venus_helper_buffers_done(inst, VB2_BUF_STATE_QUEUED); + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->streamon_out = 0; + else + inst->streamon_cap = 0; + mutex_unlock(&inst->lock); + return ret; +} + +static const struct vb2_ops vdec_vb2_ops = { + .queue_setup = vdec_queue_setup, + .buf_init = venus_helper_vb2_buf_init, + .buf_prepare = venus_helper_vb2_buf_prepare, + .start_streaming = vdec_start_streaming, + .stop_streaming = venus_helper_vb2_stop_streaming, + .buf_queue = venus_helper_vb2_buf_queue, +}; + +static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type, + u32 tag, u32 bytesused, u32 data_offset, u32 flags, + u32 hfi_flags, u64 timestamp_us) +{ + enum vb2_buffer_state state = VB2_BUF_STATE_DONE; + struct vb2_v4l2_buffer *vbuf; + struct vb2_buffer *vb; + unsigned int type; + + if (buf_type == HFI_BUFFER_INPUT) + type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + else + type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + + vbuf = venus_helper_find_buf(inst, type, tag); + if (!vbuf) + return; + + vbuf->flags = flags; + vbuf->field = V4L2_FIELD_NONE; + + if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + vb = &vbuf->vb2_buf; + vb->planes[0].bytesused = + max_t(unsigned int, inst->output_buf_size, bytesused); + vb->planes[0].data_offset = data_offset; + vb->timestamp = timestamp_us * NSEC_PER_USEC; + vbuf->sequence = inst->sequence_cap++; + + if (inst->cmd_stop) { + vbuf->flags |= V4L2_BUF_FLAG_LAST; + inst->cmd_stop = false; + } + + if (vbuf->flags & V4L2_BUF_FLAG_LAST) { + const struct v4l2_event ev = { .type = V4L2_EVENT_EOS }; + + v4l2_event_queue_fh(&inst->fh, &ev); + } + } else { + vbuf->sequence = inst->sequence_out++; + } + + if (hfi_flags & HFI_BUFFERFLAG_READONLY) + venus_helper_acquire_buf_ref(vbuf); + + if (hfi_flags & HFI_BUFFERFLAG_DATACORRUPT) + state = VB2_BUF_STATE_ERROR; + + v4l2_m2m_buf_done(vbuf, state); +} + +static void vdec_event_notify(struct venus_inst *inst, u32 event, + struct hfi_event_data *data) +{ + struct venus_core *core = inst->core; + struct device *dev = core->dev_dec; + static const struct v4l2_event ev = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION }; + + switch (event) { + case EVT_SESSION_ERROR: + inst->session_error = true; + dev_err(dev, "dec: event session error %x\n", inst->error); + break; + case EVT_SYS_EVENT_CHANGE: + switch (data->event_type) { + case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES: + hfi_session_continue(inst); + dev_dbg(dev, "event sufficient resources\n"); + break; + case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES: + inst->reconfig_height = data->height; + inst->reconfig_width = data->width; + inst->reconfig = true; + + v4l2_event_queue_fh(&inst->fh, &ev); + + dev_dbg(dev, "event not sufficient resources (%ux%u)\n", + data->width, data->height); + break; + case HFI_EVENT_RELEASE_BUFFER_REFERENCE: + venus_helper_release_buf_ref(inst, data->tag); + break; + default: + break; + } + break; + default: + break; + } +} + +static const struct hfi_inst_ops vdec_hfi_ops = { + .buf_done = vdec_buf_done, + .event_notify = vdec_event_notify, +}; + +static void vdec_inst_init(struct venus_inst *inst) +{ + inst->fmt_out = &vdec_formats[6]; + inst->fmt_cap = &vdec_formats[0]; + inst->width = 1280; + inst->height = ALIGN(720, 32); + inst->out_width = 1280; + inst->out_height = 720; + inst->fps = 30; + inst->timeperframe.numerator = 1; + inst->timeperframe.denominator = 30; + + inst->cap_width.min = 64; + inst->cap_width.max = 1920; + if (inst->core->res->hfi_version == HFI_VERSION_3XX) + inst->cap_width.max = 3840; + inst->cap_width.step_size = 1; + inst->cap_height.min = 64; + inst->cap_height.max = ALIGN(1080, 32); + if (inst->core->res->hfi_version == HFI_VERSION_3XX) + inst->cap_height.max = ALIGN(2160, 32); + inst->cap_height.step_size = 1; + inst->cap_framerate.min = 1; + inst->cap_framerate.max = 30; + inst->cap_framerate.step_size = 1; + inst->cap_mbs_per_frame.min = 16; + inst->cap_mbs_per_frame.max = 8160; +} + +static const struct v4l2_m2m_ops vdec_m2m_ops = { + .device_run = venus_helper_m2m_device_run, + .job_abort = venus_helper_m2m_job_abort, +}; + +static int m2m_queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq) +{ + struct venus_inst *inst = priv; + int ret; + + src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + src_vq->io_modes = VB2_MMAP | VB2_DMABUF; + src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + src_vq->ops = &vdec_vb2_ops; + src_vq->mem_ops = &vb2_dma_sg_memops; + src_vq->drv_priv = inst; + src_vq->buf_struct_size = sizeof(struct venus_buffer); + src_vq->allow_zero_bytesused = 1; + src_vq->min_buffers_needed = 1; + src_vq->dev = inst->core->dev; + ret = vb2_queue_init(src_vq); + if (ret) + return ret; + + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; + dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + dst_vq->ops = &vdec_vb2_ops; + dst_vq->mem_ops = &vb2_dma_sg_memops; + dst_vq->drv_priv = inst; + dst_vq->buf_struct_size = sizeof(struct venus_buffer); + dst_vq->allow_zero_bytesused = 1; + dst_vq->min_buffers_needed = 1; + dst_vq->dev = inst->core->dev; + ret = vb2_queue_init(dst_vq); + if (ret) { + vb2_queue_release(src_vq); + return ret; + } + + return 0; +} + +static int vdec_open(struct file *file) +{ + struct venus_core *core = video_drvdata(file); + struct venus_inst *inst; + int ret; + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + INIT_LIST_HEAD(&inst->registeredbufs); + INIT_LIST_HEAD(&inst->internalbufs); + INIT_LIST_HEAD(&inst->list); + mutex_init(&inst->lock); + + inst->core = core; + inst->session_type = VIDC_SESSION_TYPE_DEC; + inst->num_output_bufs = 1; + + venus_helper_init_instance(inst); + + ret = pm_runtime_get_sync(core->dev_dec); + if (ret < 0) + goto err_free_inst; + + ret = vdec_ctrl_init(inst); + if (ret) + goto err_put_sync; + + ret = hfi_session_create(inst, &vdec_hfi_ops); + if (ret) + goto err_ctrl_deinit; + + vdec_inst_init(inst); + + /* + * create m2m device for every instance, the m2m context scheduling + * is made by firmware side so we do not need to care about. + */ + inst->m2m_dev = v4l2_m2m_init(&vdec_m2m_ops); + if (IS_ERR(inst->m2m_dev)) { + ret = PTR_ERR(inst->m2m_dev); + goto err_session_destroy; + } + + inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init); + if (IS_ERR(inst->m2m_ctx)) { + ret = PTR_ERR(inst->m2m_ctx); + goto err_m2m_release; + } + + v4l2_fh_init(&inst->fh, core->vdev_dec); + + inst->fh.ctrl_handler = &inst->ctrl_handler; + v4l2_fh_add(&inst->fh); + inst->fh.m2m_ctx = inst->m2m_ctx; + file->private_data = &inst->fh; + + return 0; + +err_m2m_release: + v4l2_m2m_release(inst->m2m_dev); +err_session_destroy: + hfi_session_destroy(inst); +err_ctrl_deinit: + vdec_ctrl_deinit(inst); +err_put_sync: + pm_runtime_put_sync(core->dev_dec); +err_free_inst: + kfree(inst); + return ret; +} + +static int vdec_close(struct file *file) +{ + struct venus_inst *inst = to_inst(file); + + v4l2_m2m_ctx_release(inst->m2m_ctx); + v4l2_m2m_release(inst->m2m_dev); + vdec_ctrl_deinit(inst); + hfi_session_destroy(inst); + mutex_destroy(&inst->lock); + v4l2_fh_del(&inst->fh); + v4l2_fh_exit(&inst->fh); + + pm_runtime_put_sync(inst->core->dev_dec); + + kfree(inst); + return 0; +} + +static const struct v4l2_file_operations vdec_fops = { + .owner = THIS_MODULE, + .open = vdec_open, + .release = vdec_close, + .unlocked_ioctl = video_ioctl2, + .poll = v4l2_m2m_fop_poll, + .mmap = v4l2_m2m_fop_mmap, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = v4l2_compat_ioctl32, +#endif +}; + +static int vdec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct video_device *vdev; + struct venus_core *core; + int ret; + + if (!dev->parent) + return -EPROBE_DEFER; + + core = dev_get_drvdata(dev->parent); + if (!core) + return -EPROBE_DEFER; + + if (core->res->hfi_version == HFI_VERSION_3XX) { + core->core0_clk = devm_clk_get(dev, "core"); + if (IS_ERR(core->core0_clk)) + return PTR_ERR(core->core0_clk); + } + + platform_set_drvdata(pdev, core); + + vdev = video_device_alloc(); + if (!vdev) + return -ENOMEM; + + strlcpy(vdev->name, "qcom-venus-decoder", sizeof(vdev->name)); + vdev->release = video_device_release; + vdev->fops = &vdec_fops; + vdev->ioctl_ops = &vdec_ioctl_ops; + vdev->vfl_dir = VFL_DIR_M2M; + vdev->v4l2_dev = &core->v4l2_dev; + vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; + + ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + if (ret) + goto err_vdev_release; + + core->vdev_dec = vdev; + core->dev_dec = dev; + + video_set_drvdata(vdev, core); + pm_runtime_enable(dev); + + return 0; + +err_vdev_release: + video_device_release(vdev); + return ret; +} + +static int vdec_remove(struct platform_device *pdev) +{ + struct venus_core *core = dev_get_drvdata(pdev->dev.parent); + + video_unregister_device(core->vdev_dec); + pm_runtime_disable(core->dev_dec); + + return 0; +} + +static __maybe_unused int vdec_runtime_suspend(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + + if (core->res->hfi_version == HFI_VERSION_1XX) + return 0; + + writel(0, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL); + clk_disable_unprepare(core->core0_clk); + writel(1, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL); + + return 0; +} + +static __maybe_unused int vdec_runtime_resume(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret; + + if (core->res->hfi_version == HFI_VERSION_1XX) + return 0; + + writel(0, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL); + ret = clk_prepare_enable(core->core0_clk); + writel(1, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL); + + return ret; +} + +static const struct dev_pm_ops vdec_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(vdec_runtime_suspend, vdec_runtime_resume, NULL) +}; + +static const struct of_device_id vdec_dt_match[] = { + { .compatible = "venus-decoder" }, + { } +}; +MODULE_DEVICE_TABLE(of, vdec_dt_match); + +static struct platform_driver qcom_venus_dec_driver = { + .probe = vdec_probe, + .remove = vdec_remove, + .driver = { + .name = "qcom-venus-decoder", + .of_match_table = vdec_dt_match, + .pm = &vdec_pm_ops, + }, +}; +module_platform_driver(qcom_venus_dec_driver); + +MODULE_ALIAS("platform:qcom-venus-decoder"); +MODULE_DESCRIPTION("Qualcomm Venus video decoder driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/qcom/venus/vdec.h b/drivers/media/platform/qcom/venus/vdec.h new file mode 100644 index 000000000000..84b672c54d02 --- /dev/null +++ b/drivers/media/platform/qcom/venus/vdec.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __VENUS_VDEC_H__ +#define __VENUS_VDEC_H__ + +struct venus_inst; + +int vdec_ctrl_init(struct venus_inst *inst); +void vdec_ctrl_deinit(struct venus_inst *inst); + +#endif diff --git a/drivers/media/platform/qcom/venus/vdec_ctrls.c b/drivers/media/platform/qcom/venus/vdec_ctrls.c new file mode 100644 index 000000000000..032839bbc967 --- /dev/null +++ b/drivers/media/platform/qcom/venus/vdec_ctrls.c @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/types.h> +#include <media/v4l2-ctrls.h> + +#include "core.h" +#include "vdec.h" + +static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct venus_inst *inst = ctrl_to_inst(ctrl); + struct vdec_controls *ctr = &inst->controls.dec; + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: + ctr->post_loop_deb_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + case V4L2_CID_MPEG_VIDEO_VPX_PROFILE: + ctr->profile = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + ctr->level = ctrl->val; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl) +{ + struct venus_inst *inst = ctrl_to_inst(ctrl); + struct vdec_controls *ctr = &inst->controls.dec; + union hfi_get_property hprop; + u32 ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT; + int ret; + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + case V4L2_CID_MPEG_VIDEO_VPX_PROFILE: + ret = hfi_session_get_property(inst, ptype, &hprop); + if (!ret) + ctr->profile = hprop.profile_level.profile; + ctrl->val = ctr->profile; + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + ret = hfi_session_get_property(inst, ptype, &hprop); + if (!ret) + ctr->level = hprop.profile_level.level; + ctrl->val = ctr->level; + break; + case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER: + ctrl->val = ctr->post_loop_deb_mode; + break; + case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: + ctrl->val = inst->num_output_bufs; + break; + default: + return -EINVAL; + }; + + return 0; +} + +static const struct v4l2_ctrl_ops vdec_ctrl_ops = { + .s_ctrl = vdec_op_s_ctrl, + .g_volatile_ctrl = vdec_op_g_volatile_ctrl, +}; + +int vdec_ctrl_init(struct venus_inst *inst) +{ + struct v4l2_ctrl *ctrl; + int ret; + + ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 7); + if (ret) + return ret; + + ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY, + ~((1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE) | + (1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE)), + V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_5, + 0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_PROFILE, + V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH, + ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH)), + V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LEVEL, + V4L2_MPEG_VIDEO_H264_LEVEL_5_1, + 0, V4L2_MPEG_VIDEO_H264_LEVEL_1_0); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_VPX_PROFILE, 0, 3, 1, 0); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER, 0, 1, 1, 0); + + ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops, + V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 1); + if (ctrl) + ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; + + ret = inst->ctrl_handler.error; + if (ret) { + v4l2_ctrl_handler_free(&inst->ctrl_handler); + return ret; + } + + return 0; +} + +void vdec_ctrl_deinit(struct venus_inst *inst) +{ + v4l2_ctrl_handler_free(&inst->ctrl_handler); +} diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c new file mode 100644 index 000000000000..6f123a387cf9 --- /dev/null +++ b/drivers/media/platform/qcom/venus/venc.c @@ -0,0 +1,1290 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <media/v4l2-mem2mem.h> +#include <media/videobuf2-dma-sg.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-event.h> +#include <media/v4l2-ctrls.h> + +#include "hfi_venus_io.h" +#include "core.h" +#include "helpers.h" +#include "venc.h" + +#define NUM_B_FRAMES_MAX 4 + +static u32 get_framesize_uncompressed(unsigned int plane, u32 width, u32 height) +{ + u32 y_stride, uv_stride, y_plane; + u32 y_sclines, uv_sclines, uv_plane; + u32 size; + + y_stride = ALIGN(width, 128); + uv_stride = ALIGN(width, 128); + y_sclines = ALIGN(height, 32); + uv_sclines = ALIGN(((height + 1) >> 1), 16); + + y_plane = y_stride * y_sclines; + uv_plane = uv_stride * uv_sclines + SZ_4K; + size = y_plane + uv_plane + SZ_8K; + size = ALIGN(size, SZ_4K); + + return size; +} + +static u32 get_framesize_compressed(u32 width, u32 height) +{ + u32 sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2; + + return ALIGN(sz, SZ_4K); +} + +/* + * Three resons to keep MPLANE formats (despite that the number of planes + * currently is one): + * - the MPLANE formats allow only one plane to be used + * - the downstream driver use MPLANE formats too + * - future firmware versions could add support for >1 planes + */ +static const struct venus_format venc_formats[] = { + { + .pixfmt = V4L2_PIX_FMT_NV12, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_MPEG4, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_H263, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_H264, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, { + .pixfmt = V4L2_PIX_FMT_VP8, + .num_planes = 1, + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, + }, +}; + +static const struct venus_format * +find_format(struct venus_inst *inst, u32 pixfmt, u32 type) +{ + const struct venus_format *fmt = venc_formats; + unsigned int size = ARRAY_SIZE(venc_formats); + unsigned int i; + + for (i = 0; i < size; i++) { + if (fmt[i].pixfmt == pixfmt) + break; + } + + if (i == size || fmt[i].type != type) + return NULL; + + if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + !venus_helper_check_codec(inst, fmt[i].pixfmt)) + return NULL; + + return &fmt[i]; +} + +static const struct venus_format * +find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type) +{ + const struct venus_format *fmt = venc_formats; + unsigned int size = ARRAY_SIZE(venc_formats); + unsigned int i, k = 0; + + if (index > size) + return NULL; + + for (i = 0; i < size; i++) { + if (fmt[i].type != type) + continue; + if (k == index) + break; + k++; + } + + if (i == size) + return NULL; + + if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + !venus_helper_check_codec(inst, fmt[i].pixfmt)) + return NULL; + + return &fmt[i]; +} + +static int venc_v4l2_to_hfi(int id, int value) +{ + switch (id) { + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + switch (value) { + case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0: + default: + return HFI_MPEG4_LEVEL_0; + case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B: + return HFI_MPEG4_LEVEL_0b; + case V4L2_MPEG_VIDEO_MPEG4_LEVEL_1: + return HFI_MPEG4_LEVEL_1; + case V4L2_MPEG_VIDEO_MPEG4_LEVEL_2: + return HFI_MPEG4_LEVEL_2; + case V4L2_MPEG_VIDEO_MPEG4_LEVEL_3: + return HFI_MPEG4_LEVEL_3; + case V4L2_MPEG_VIDEO_MPEG4_LEVEL_4: + return HFI_MPEG4_LEVEL_4; + case V4L2_MPEG_VIDEO_MPEG4_LEVEL_5: + return HFI_MPEG4_LEVEL_5; + } + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + switch (value) { + case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE: + default: + return HFI_MPEG4_PROFILE_SIMPLE; + case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE: + return HFI_MPEG4_PROFILE_ADVANCEDSIMPLE; + } + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + switch (value) { + case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE: + return HFI_H264_PROFILE_BASELINE; + case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE: + return HFI_H264_PROFILE_CONSTRAINED_BASE; + case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN: + return HFI_H264_PROFILE_MAIN; + case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH: + default: + return HFI_H264_PROFILE_HIGH; + } + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + switch (value) { + case V4L2_MPEG_VIDEO_H264_LEVEL_1_0: + return HFI_H264_LEVEL_1; + case V4L2_MPEG_VIDEO_H264_LEVEL_1B: + return HFI_H264_LEVEL_1b; + case V4L2_MPEG_VIDEO_H264_LEVEL_1_1: + return HFI_H264_LEVEL_11; + case V4L2_MPEG_VIDEO_H264_LEVEL_1_2: + return HFI_H264_LEVEL_12; + case V4L2_MPEG_VIDEO_H264_LEVEL_1_3: + return HFI_H264_LEVEL_13; + case V4L2_MPEG_VIDEO_H264_LEVEL_2_0: + return HFI_H264_LEVEL_2; + case V4L2_MPEG_VIDEO_H264_LEVEL_2_1: + return HFI_H264_LEVEL_21; + case V4L2_MPEG_VIDEO_H264_LEVEL_2_2: + return HFI_H264_LEVEL_22; + case V4L2_MPEG_VIDEO_H264_LEVEL_3_0: + return HFI_H264_LEVEL_3; + case V4L2_MPEG_VIDEO_H264_LEVEL_3_1: + return HFI_H264_LEVEL_31; + case V4L2_MPEG_VIDEO_H264_LEVEL_3_2: + return HFI_H264_LEVEL_32; + case V4L2_MPEG_VIDEO_H264_LEVEL_4_0: + return HFI_H264_LEVEL_4; + case V4L2_MPEG_VIDEO_H264_LEVEL_4_1: + return HFI_H264_LEVEL_41; + case V4L2_MPEG_VIDEO_H264_LEVEL_4_2: + return HFI_H264_LEVEL_42; + case V4L2_MPEG_VIDEO_H264_LEVEL_5_0: + default: + return HFI_H264_LEVEL_5; + case V4L2_MPEG_VIDEO_H264_LEVEL_5_1: + return HFI_H264_LEVEL_51; + } + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + switch (value) { + case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC: + default: + return HFI_H264_ENTROPY_CAVLC; + case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC: + return HFI_H264_ENTROPY_CABAC; + } + case V4L2_CID_MPEG_VIDEO_VPX_PROFILE: + switch (value) { + case 0: + default: + return HFI_VPX_PROFILE_VERSION_0; + case 1: + return HFI_VPX_PROFILE_VERSION_1; + case 2: + return HFI_VPX_PROFILE_VERSION_2; + case 3: + return HFI_VPX_PROFILE_VERSION_3; + } + } + + return 0; +} + +static int +venc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) +{ + strlcpy(cap->driver, "qcom-venus", sizeof(cap->driver)); + strlcpy(cap->card, "Qualcomm Venus video encoder", sizeof(cap->card)); + strlcpy(cap->bus_info, "platform:qcom-venus", sizeof(cap->bus_info)); + + return 0; +} + +static int venc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + + fmt = find_format_by_index(inst, f->index, f->type); + + memset(f->reserved, 0, sizeof(f->reserved)); + + if (!fmt) + return -EINVAL; + + f->pixelformat = fmt->pixfmt; + + return 0; +} + +static const struct venus_format * +venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt; + const struct venus_format *fmt; + unsigned int p; + + memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved)); + memset(pixmp->reserved, 0, sizeof(pixmp->reserved)); + + fmt = find_format(inst, pixmp->pixelformat, f->type); + if (!fmt) { + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + pixmp->pixelformat = V4L2_PIX_FMT_H264; + else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + pixmp->pixelformat = V4L2_PIX_FMT_NV12; + else + return NULL; + fmt = find_format(inst, pixmp->pixelformat, f->type); + pixmp->width = 1280; + pixmp->height = 720; + } + + pixmp->width = clamp(pixmp->width, inst->cap_width.min, + inst->cap_width.max); + pixmp->height = clamp(pixmp->height, inst->cap_height.min, + inst->cap_height.max); + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + pixmp->height = ALIGN(pixmp->height, 32); + + pixmp->width = ALIGN(pixmp->width, 2); + pixmp->height = ALIGN(pixmp->height, 2); + + if (pixmp->field == V4L2_FIELD_ANY) + pixmp->field = V4L2_FIELD_NONE; + pixmp->num_planes = fmt->num_planes; + pixmp->flags = 0; + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + for (p = 0; p < pixmp->num_planes; p++) { + pfmt[p].sizeimage = + get_framesize_uncompressed(p, pixmp->width, + pixmp->height); + + pfmt[p].bytesperline = ALIGN(pixmp->width, 128); + } + } else { + pfmt[0].sizeimage = get_framesize_compressed(pixmp->width, + pixmp->height); + pfmt[0].bytesperline = 0; + } + + return fmt; +} + +static int venc_try_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct venus_inst *inst = to_inst(file); + + venc_try_fmt_common(inst, f); + + return 0; +} + +static int venc_s_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct venus_inst *inst = to_inst(file); + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + struct v4l2_pix_format_mplane orig_pixmp; + const struct venus_format *fmt; + struct v4l2_format format; + u32 pixfmt_out = 0, pixfmt_cap = 0; + + orig_pixmp = *pixmp; + + fmt = venc_try_fmt_common(inst, f); + if (!fmt) + return -EINVAL; + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + pixfmt_out = pixmp->pixelformat; + pixfmt_cap = inst->fmt_cap->pixfmt; + } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + pixfmt_cap = pixmp->pixelformat; + pixfmt_out = inst->fmt_out->pixfmt; + } + + memset(&format, 0, sizeof(format)); + + format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + format.fmt.pix_mp.pixelformat = pixfmt_out; + format.fmt.pix_mp.width = orig_pixmp.width; + format.fmt.pix_mp.height = orig_pixmp.height; + venc_try_fmt_common(inst, &format); + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + inst->out_width = format.fmt.pix_mp.width; + inst->out_height = format.fmt.pix_mp.height; + inst->colorspace = pixmp->colorspace; + inst->ycbcr_enc = pixmp->ycbcr_enc; + inst->quantization = pixmp->quantization; + inst->xfer_func = pixmp->xfer_func; + } + + memset(&format, 0, sizeof(format)); + + format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + format.fmt.pix_mp.pixelformat = pixfmt_cap; + format.fmt.pix_mp.width = orig_pixmp.width; + format.fmt.pix_mp.height = orig_pixmp.height; + venc_try_fmt_common(inst, &format); + + inst->width = format.fmt.pix_mp.width; + inst->height = format.fmt.pix_mp.height; + + if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->fmt_out = fmt; + else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + inst->fmt_cap = fmt; + + return 0; +} + +static int venc_g_fmt(struct file *file, void *fh, struct v4l2_format *f) +{ + struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp; + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + fmt = inst->fmt_cap; + else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + fmt = inst->fmt_out; + else + return -EINVAL; + + pixmp->pixelformat = fmt->pixfmt; + + if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + pixmp->width = inst->width; + pixmp->height = inst->height; + pixmp->colorspace = inst->colorspace; + pixmp->ycbcr_enc = inst->ycbcr_enc; + pixmp->quantization = inst->quantization; + pixmp->xfer_func = inst->xfer_func; + } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + pixmp->width = inst->out_width; + pixmp->height = inst->out_height; + } + + venc_try_fmt_common(inst, f); + + return 0; +} + +static int +venc_g_selection(struct file *file, void *fh, struct v4l2_selection *s) +{ + struct venus_inst *inst = to_inst(file); + + if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + switch (s->target) { + case V4L2_SEL_TGT_CROP_DEFAULT: + case V4L2_SEL_TGT_CROP_BOUNDS: + s->r.width = inst->width; + s->r.height = inst->height; + break; + case V4L2_SEL_TGT_CROP: + s->r.width = inst->out_width; + s->r.height = inst->out_height; + break; + default: + return -EINVAL; + } + + s->r.top = 0; + s->r.left = 0; + + return 0; +} + +static int +venc_s_selection(struct file *file, void *fh, struct v4l2_selection *s) +{ + struct venus_inst *inst = to_inst(file); + + if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) + return -EINVAL; + + switch (s->target) { + case V4L2_SEL_TGT_CROP: + if (s->r.width != inst->out_width || + s->r.height != inst->out_height || + s->r.top != 0 || s->r.left != 0) + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) +{ + struct venus_inst *inst = to_inst(file); + struct v4l2_outputparm *out = &a->parm.output; + struct v4l2_fract *timeperframe = &out->timeperframe; + u64 us_per_frame, fps; + + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return -EINVAL; + + memset(out->reserved, 0, sizeof(out->reserved)); + + if (!timeperframe->denominator) + timeperframe->denominator = inst->timeperframe.denominator; + if (!timeperframe->numerator) + timeperframe->numerator = inst->timeperframe.numerator; + + out->capability = V4L2_CAP_TIMEPERFRAME; + + us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC; + do_div(us_per_frame, timeperframe->denominator); + + if (!us_per_frame) + return -EINVAL; + + fps = (u64)USEC_PER_SEC; + do_div(fps, us_per_frame); + + inst->timeperframe = *timeperframe; + inst->fps = fps; + + return 0; +} + +static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) +{ + struct venus_inst *inst = to_inst(file); + + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + return -EINVAL; + + a->parm.output.capability |= V4L2_CAP_TIMEPERFRAME; + a->parm.output.timeperframe = inst->timeperframe; + + return 0; +} + +static int venc_enum_framesizes(struct file *file, void *fh, + struct v4l2_frmsizeenum *fsize) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + + fmt = find_format(inst, fsize->pixel_format, + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); + if (!fmt) { + fmt = find_format(inst, fsize->pixel_format, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + if (!fmt) + return -EINVAL; + } + + if (fsize->index) + return -EINVAL; + + fsize->stepwise.min_width = inst->cap_width.min; + fsize->stepwise.max_width = inst->cap_width.max; + fsize->stepwise.step_width = inst->cap_width.step_size; + fsize->stepwise.min_height = inst->cap_height.min; + fsize->stepwise.max_height = inst->cap_height.max; + fsize->stepwise.step_height = inst->cap_height.step_size; + + return 0; +} + +static int venc_enum_frameintervals(struct file *file, void *fh, + struct v4l2_frmivalenum *fival) +{ + struct venus_inst *inst = to_inst(file); + const struct venus_format *fmt; + + fival->type = V4L2_FRMIVAL_TYPE_STEPWISE; + + fmt = find_format(inst, fival->pixel_format, + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); + if (!fmt) { + fmt = find_format(inst, fival->pixel_format, + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + if (!fmt) + return -EINVAL; + } + + if (fival->index) + return -EINVAL; + + if (!fival->width || !fival->height) + return -EINVAL; + + if (fival->width > inst->cap_width.max || + fival->width < inst->cap_width.min || + fival->height > inst->cap_height.max || + fival->height < inst->cap_height.min) + return -EINVAL; + + fival->stepwise.min.numerator = 1; + fival->stepwise.min.denominator = inst->cap_framerate.max; + fival->stepwise.max.numerator = 1; + fival->stepwise.max.denominator = inst->cap_framerate.min; + fival->stepwise.step.numerator = 1; + fival->stepwise.step.denominator = inst->cap_framerate.max; + + return 0; +} + +static const struct v4l2_ioctl_ops venc_ioctl_ops = { + .vidioc_querycap = venc_querycap, + .vidioc_enum_fmt_vid_cap_mplane = venc_enum_fmt, + .vidioc_enum_fmt_vid_out_mplane = venc_enum_fmt, + .vidioc_s_fmt_vid_cap_mplane = venc_s_fmt, + .vidioc_s_fmt_vid_out_mplane = venc_s_fmt, + .vidioc_g_fmt_vid_cap_mplane = venc_g_fmt, + .vidioc_g_fmt_vid_out_mplane = venc_g_fmt, + .vidioc_try_fmt_vid_cap_mplane = venc_try_fmt, + .vidioc_try_fmt_vid_out_mplane = venc_try_fmt, + .vidioc_g_selection = venc_g_selection, + .vidioc_s_selection = venc_s_selection, + .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, + .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, + .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, + .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, + .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, + .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, + .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, + .vidioc_streamon = v4l2_m2m_ioctl_streamon, + .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, + .vidioc_s_parm = venc_s_parm, + .vidioc_g_parm = venc_g_parm, + .vidioc_enum_framesizes = venc_enum_framesizes, + .vidioc_enum_frameintervals = venc_enum_frameintervals, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static int venc_set_properties(struct venus_inst *inst) +{ + struct venc_controls *ctr = &inst->controls.enc; + struct hfi_intra_period intra_period; + struct hfi_profile_level pl; + struct hfi_framerate frate; + struct hfi_bitrate brate; + struct hfi_idr_period idrp; + u32 ptype, rate_control, bitrate, profile = 0, level = 0; + int ret; + + ptype = HFI_PROPERTY_CONFIG_FRAME_RATE; + frate.buffer_type = HFI_BUFFER_OUTPUT; + frate.framerate = inst->fps * (1 << 16); + + ret = hfi_session_set_property(inst, ptype, &frate); + if (ret) + return ret; + + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264) { + struct hfi_h264_vui_timing_info info; + + ptype = HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO; + info.enable = 1; + info.fixed_framerate = 1; + info.time_scale = NSEC_PER_SEC; + + ret = hfi_session_set_property(inst, ptype, &info); + if (ret) + return ret; + } + + ptype = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD; + idrp.idr_period = ctr->gop_size; + ret = hfi_session_set_property(inst, ptype, &idrp); + if (ret) + return ret; + + if (ctr->num_b_frames) { + u32 max_num_b_frames = NUM_B_FRAMES_MAX; + + ptype = HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES; + ret = hfi_session_set_property(inst, ptype, &max_num_b_frames); + if (ret) + return ret; + } + + /* intra_period = pframes + bframes + 1 */ + if (!ctr->num_p_frames) + ctr->num_p_frames = 2 * 15 - 1, + + ptype = HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD; + intra_period.pframes = ctr->num_p_frames; + intra_period.bframes = ctr->num_b_frames; + + ret = hfi_session_set_property(inst, ptype, &intra_period); + if (ret) + return ret; + + if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) + rate_control = HFI_RATE_CONTROL_VBR_CFR; + else + rate_control = HFI_RATE_CONTROL_CBR_CFR; + + ptype = HFI_PROPERTY_PARAM_VENC_RATE_CONTROL; + ret = hfi_session_set_property(inst, ptype, &rate_control); + if (ret) + return ret; + + if (!ctr->bitrate) + bitrate = 64000; + else + bitrate = ctr->bitrate; + + ptype = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE; + brate.bitrate = bitrate; + brate.layer_id = 0; + + ret = hfi_session_set_property(inst, ptype, &brate); + if (ret) + return ret; + + if (!ctr->bitrate_peak) + bitrate *= 2; + else + bitrate = ctr->bitrate_peak; + + ptype = HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE; + brate.bitrate = bitrate; + brate.layer_id = 0; + + ret = hfi_session_set_property(inst, ptype, &brate); + if (ret) + return ret; + + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264) { + profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_H264_PROFILE, + ctr->profile.h264); + level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_H264_LEVEL, + ctr->level.h264); + } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_VP8) { + profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_VPX_PROFILE, + ctr->profile.vpx); + level = 0; + } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_MPEG4) { + profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE, + ctr->profile.mpeg4); + level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL, + ctr->level.mpeg4); + } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H263) { + profile = 0; + level = 0; + } + + ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT; + pl.profile = profile; + pl.level = level; + + ret = hfi_session_set_property(inst, ptype, &pl); + if (ret) + return ret; + + return 0; +} + +static int venc_init_session(struct venus_inst *inst) +{ + int ret; + + ret = hfi_session_init(inst, inst->fmt_cap->pixfmt); + if (ret) + return ret; + + ret = venus_helper_set_input_resolution(inst, inst->width, + inst->height); + if (ret) + goto deinit; + + ret = venus_helper_set_output_resolution(inst, inst->width, + inst->height); + if (ret) + goto deinit; + + ret = venus_helper_set_color_format(inst, inst->fmt_out->pixfmt); + if (ret) + goto deinit; + + return 0; +deinit: + hfi_session_deinit(inst); + return ret; +} + +static int venc_out_num_buffers(struct venus_inst *inst, unsigned int *num) +{ + struct hfi_buffer_requirements bufreq; + int ret; + + ret = venc_init_session(inst); + if (ret) + return ret; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq); + + *num = bufreq.count_actual; + + hfi_session_deinit(inst); + + return ret; +} + +static int venc_queue_setup(struct vb2_queue *q, + unsigned int *num_buffers, unsigned int *num_planes, + unsigned int sizes[], struct device *alloc_devs[]) +{ + struct venus_inst *inst = vb2_get_drv_priv(q); + unsigned int p, num, min = 4; + int ret = 0; + + if (*num_planes) { + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + *num_planes != inst->fmt_out->num_planes) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + *num_planes != inst->fmt_cap->num_planes) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + sizes[0] < inst->input_buf_size) + return -EINVAL; + + if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + sizes[0] < inst->output_buf_size) + return -EINVAL; + + return 0; + } + + switch (q->type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: + *num_planes = inst->fmt_out->num_planes; + + ret = venc_out_num_buffers(inst, &num); + if (ret) + break; + + num = max(num, min); + *num_buffers = max(*num_buffers, num); + inst->num_input_bufs = *num_buffers; + + for (p = 0; p < *num_planes; ++p) + sizes[p] = get_framesize_uncompressed(p, inst->width, + inst->height); + inst->input_buf_size = sizes[0]; + break; + case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: + *num_planes = inst->fmt_cap->num_planes; + *num_buffers = max(*num_buffers, min); + inst->num_output_bufs = *num_buffers; + sizes[0] = get_framesize_compressed(inst->width, inst->height); + inst->output_buf_size = sizes[0]; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int venc_verify_conf(struct venus_inst *inst) +{ + struct hfi_buffer_requirements bufreq; + int ret; + + if (!inst->num_input_bufs || !inst->num_output_bufs) + return -EINVAL; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq); + if (ret) + return ret; + + if (inst->num_output_bufs < bufreq.count_actual || + inst->num_output_bufs < bufreq.count_min) + return -EINVAL; + + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq); + if (ret) + return ret; + + if (inst->num_input_bufs < bufreq.count_actual || + inst->num_input_bufs < bufreq.count_min) + return -EINVAL; + + return 0; +} + +static int venc_start_streaming(struct vb2_queue *q, unsigned int count) +{ + struct venus_inst *inst = vb2_get_drv_priv(q); + int ret; + + mutex_lock(&inst->lock); + + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->streamon_out = 1; + else + inst->streamon_cap = 1; + + if (!(inst->streamon_out & inst->streamon_cap)) { + mutex_unlock(&inst->lock); + return 0; + } + + venus_helper_init_instance(inst); + + inst->sequence_cap = 0; + inst->sequence_out = 0; + + ret = venc_init_session(inst); + if (ret) + goto bufs_done; + + ret = venc_set_properties(inst); + if (ret) + goto deinit_sess; + + ret = venc_verify_conf(inst); + if (ret) + goto deinit_sess; + + ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs, + inst->num_output_bufs); + if (ret) + goto deinit_sess; + + ret = venus_helper_vb2_start_streaming(inst); + if (ret) + goto deinit_sess; + + mutex_unlock(&inst->lock); + + return 0; + +deinit_sess: + hfi_session_deinit(inst); +bufs_done: + venus_helper_buffers_done(inst, VB2_BUF_STATE_QUEUED); + if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + inst->streamon_out = 0; + else + inst->streamon_cap = 0; + mutex_unlock(&inst->lock); + return ret; +} + +static const struct vb2_ops venc_vb2_ops = { + .queue_setup = venc_queue_setup, + .buf_init = venus_helper_vb2_buf_init, + .buf_prepare = venus_helper_vb2_buf_prepare, + .start_streaming = venc_start_streaming, + .stop_streaming = venus_helper_vb2_stop_streaming, + .buf_queue = venus_helper_vb2_buf_queue, +}; + +static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type, + u32 tag, u32 bytesused, u32 data_offset, u32 flags, + u32 hfi_flags, u64 timestamp_us) +{ + struct vb2_v4l2_buffer *vbuf; + struct vb2_buffer *vb; + unsigned int type; + + if (buf_type == HFI_BUFFER_INPUT) + type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + else + type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + + vbuf = venus_helper_find_buf(inst, type, tag); + if (!vbuf) + return; + + vb = &vbuf->vb2_buf; + vb->planes[0].bytesused = bytesused; + vb->planes[0].data_offset = data_offset; + + vbuf->flags = flags; + + if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + vb->timestamp = timestamp_us * NSEC_PER_USEC; + vbuf->sequence = inst->sequence_cap++; + } else { + vbuf->sequence = inst->sequence_out++; + } + + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE); +} + +static void venc_event_notify(struct venus_inst *inst, u32 event, + struct hfi_event_data *data) +{ + struct device *dev = inst->core->dev_enc; + + if (event == EVT_SESSION_ERROR) { + inst->session_error = true; + dev_err(dev, "enc: event session error %x\n", inst->error); + } +} + +static const struct hfi_inst_ops venc_hfi_ops = { + .buf_done = venc_buf_done, + .event_notify = venc_event_notify, +}; + +static const struct v4l2_m2m_ops venc_m2m_ops = { + .device_run = venus_helper_m2m_device_run, + .job_abort = venus_helper_m2m_job_abort, +}; + +static int m2m_queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq) +{ + struct venus_inst *inst = priv; + int ret; + + src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + src_vq->io_modes = VB2_MMAP | VB2_DMABUF; + src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + src_vq->ops = &venc_vb2_ops; + src_vq->mem_ops = &vb2_dma_sg_memops; + src_vq->drv_priv = inst; + src_vq->buf_struct_size = sizeof(struct venus_buffer); + src_vq->allow_zero_bytesused = 1; + src_vq->min_buffers_needed = 1; + src_vq->dev = inst->core->dev; + if (inst->core->res->hfi_version == HFI_VERSION_1XX) + src_vq->bidirectional = 1; + ret = vb2_queue_init(src_vq); + if (ret) + return ret; + + dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; + dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; + dst_vq->ops = &venc_vb2_ops; + dst_vq->mem_ops = &vb2_dma_sg_memops; + dst_vq->drv_priv = inst; + dst_vq->buf_struct_size = sizeof(struct venus_buffer); + dst_vq->allow_zero_bytesused = 1; + dst_vq->min_buffers_needed = 1; + dst_vq->dev = inst->core->dev; + ret = vb2_queue_init(dst_vq); + if (ret) { + vb2_queue_release(src_vq); + return ret; + } + + return 0; +} + +static void venc_inst_init(struct venus_inst *inst) +{ + inst->fmt_cap = &venc_formats[2]; + inst->fmt_out = &venc_formats[0]; + inst->width = 1280; + inst->height = ALIGN(720, 32); + inst->out_width = 1280; + inst->out_height = 720; + inst->fps = 15; + inst->timeperframe.numerator = 1; + inst->timeperframe.denominator = 15; + + inst->cap_width.min = 96; + inst->cap_width.max = 1920; + if (inst->core->res->hfi_version == HFI_VERSION_3XX) + inst->cap_width.max = 3840; + inst->cap_width.step_size = 2; + inst->cap_height.min = 64; + inst->cap_height.max = ALIGN(1080, 32); + if (inst->core->res->hfi_version == HFI_VERSION_3XX) + inst->cap_height.max = ALIGN(2160, 32); + inst->cap_height.step_size = 2; + inst->cap_framerate.min = 1; + inst->cap_framerate.max = 30; + inst->cap_framerate.step_size = 1; + inst->cap_mbs_per_frame.min = 24; + inst->cap_mbs_per_frame.max = 8160; +} + +static int venc_open(struct file *file) +{ + struct venus_core *core = video_drvdata(file); + struct venus_inst *inst; + int ret; + + inst = kzalloc(sizeof(*inst), GFP_KERNEL); + if (!inst) + return -ENOMEM; + + INIT_LIST_HEAD(&inst->registeredbufs); + INIT_LIST_HEAD(&inst->internalbufs); + INIT_LIST_HEAD(&inst->list); + mutex_init(&inst->lock); + + inst->core = core; + inst->session_type = VIDC_SESSION_TYPE_ENC; + + venus_helper_init_instance(inst); + + ret = pm_runtime_get_sync(core->dev_enc); + if (ret < 0) + goto err_free_inst; + + ret = venc_ctrl_init(inst); + if (ret) + goto err_put_sync; + + ret = hfi_session_create(inst, &venc_hfi_ops); + if (ret) + goto err_ctrl_deinit; + + venc_inst_init(inst); + + /* + * create m2m device for every instance, the m2m context scheduling + * is made by firmware side so we do not need to care about. + */ + inst->m2m_dev = v4l2_m2m_init(&venc_m2m_ops); + if (IS_ERR(inst->m2m_dev)) { + ret = PTR_ERR(inst->m2m_dev); + goto err_session_destroy; + } + + inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init); + if (IS_ERR(inst->m2m_ctx)) { + ret = PTR_ERR(inst->m2m_ctx); + goto err_m2m_release; + } + + v4l2_fh_init(&inst->fh, core->vdev_enc); + + inst->fh.ctrl_handler = &inst->ctrl_handler; + v4l2_fh_add(&inst->fh); + inst->fh.m2m_ctx = inst->m2m_ctx; + file->private_data = &inst->fh; + + return 0; + +err_m2m_release: + v4l2_m2m_release(inst->m2m_dev); +err_session_destroy: + hfi_session_destroy(inst); +err_ctrl_deinit: + venc_ctrl_deinit(inst); +err_put_sync: + pm_runtime_put_sync(core->dev_enc); +err_free_inst: + kfree(inst); + return ret; +} + +static int venc_close(struct file *file) +{ + struct venus_inst *inst = to_inst(file); + + v4l2_m2m_ctx_release(inst->m2m_ctx); + v4l2_m2m_release(inst->m2m_dev); + venc_ctrl_deinit(inst); + hfi_session_destroy(inst); + mutex_destroy(&inst->lock); + v4l2_fh_del(&inst->fh); + v4l2_fh_exit(&inst->fh); + + pm_runtime_put_sync(inst->core->dev_enc); + + kfree(inst); + return 0; +} + +static const struct v4l2_file_operations venc_fops = { + .owner = THIS_MODULE, + .open = venc_open, + .release = venc_close, + .unlocked_ioctl = video_ioctl2, + .poll = v4l2_m2m_fop_poll, + .mmap = v4l2_m2m_fop_mmap, +#ifdef CONFIG_COMPAT + .compat_ioctl32 = v4l2_compat_ioctl32, +#endif +}; + +static int venc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct video_device *vdev; + struct venus_core *core; + int ret; + + if (!dev->parent) + return -EPROBE_DEFER; + + core = dev_get_drvdata(dev->parent); + if (!core) + return -EPROBE_DEFER; + + if (core->res->hfi_version == HFI_VERSION_3XX) { + core->core1_clk = devm_clk_get(dev, "core"); + if (IS_ERR(core->core1_clk)) + return PTR_ERR(core->core1_clk); + } + + platform_set_drvdata(pdev, core); + + vdev = video_device_alloc(); + if (!vdev) + return -ENOMEM; + + strlcpy(vdev->name, "qcom-venus-encoder", sizeof(vdev->name)); + vdev->release = video_device_release; + vdev->fops = &venc_fops; + vdev->ioctl_ops = &venc_ioctl_ops; + vdev->vfl_dir = VFL_DIR_M2M; + vdev->v4l2_dev = &core->v4l2_dev; + vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING; + + ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); + if (ret) + goto err_vdev_release; + + core->vdev_enc = vdev; + core->dev_enc = dev; + + video_set_drvdata(vdev, core); + pm_runtime_enable(dev); + + return 0; + +err_vdev_release: + video_device_release(vdev); + return ret; +} + +static int venc_remove(struct platform_device *pdev) +{ + struct venus_core *core = dev_get_drvdata(pdev->dev.parent); + + video_unregister_device(core->vdev_enc); + pm_runtime_disable(core->dev_enc); + + return 0; +} + +static __maybe_unused int venc_runtime_suspend(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + + if (core->res->hfi_version == HFI_VERSION_1XX) + return 0; + + writel(0, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL); + clk_disable_unprepare(core->core1_clk); + writel(1, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL); + + return 0; +} + +static __maybe_unused int venc_runtime_resume(struct device *dev) +{ + struct venus_core *core = dev_get_drvdata(dev); + int ret; + + if (core->res->hfi_version == HFI_VERSION_1XX) + return 0; + + writel(0, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL); + ret = clk_prepare_enable(core->core1_clk); + writel(1, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL); + + return ret; +} + +static const struct dev_pm_ops venc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(venc_runtime_suspend, venc_runtime_resume, NULL) +}; + +static const struct of_device_id venc_dt_match[] = { + { .compatible = "venus-encoder" }, + { } +}; +MODULE_DEVICE_TABLE(of, venc_dt_match); + +static struct platform_driver qcom_venus_enc_driver = { + .probe = venc_probe, + .remove = venc_remove, + .driver = { + .name = "qcom-venus-encoder", + .of_match_table = venc_dt_match, + .pm = &venc_pm_ops, + }, +}; +module_platform_driver(qcom_venus_enc_driver); + +MODULE_ALIAS("platform:qcom-venus-encoder"); +MODULE_DESCRIPTION("Qualcomm Venus video encoder driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/qcom/venus/venc.h b/drivers/media/platform/qcom/venus/venc.h new file mode 100644 index 000000000000..9daca669f307 --- /dev/null +++ b/drivers/media/platform/qcom/venus/venc.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __VENUS_VENC_H__ +#define __VENUS_VENC_H__ + +struct venus_inst; + +int venc_ctrl_init(struct venus_inst *inst); +void venc_ctrl_deinit(struct venus_inst *inst); + +#endif diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c new file mode 100644 index 000000000000..ab0fe51ff0f7 --- /dev/null +++ b/drivers/media/platform/qcom/venus/venc_ctrls.c @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2017 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/types.h> +#include <media/v4l2-ctrls.h> + +#include "core.h" +#include "venc.h" + +#define BITRATE_MIN 32000 +#define BITRATE_MAX 160000000 +#define BITRATE_DEFAULT 1000000 +#define BITRATE_DEFAULT_PEAK (BITRATE_DEFAULT * 2) +#define BITRATE_STEP 100 +#define SLICE_BYTE_SIZE_MAX 1024 +#define SLICE_BYTE_SIZE_MIN 1024 +#define SLICE_MB_SIZE_MAX 300 +#define INTRA_REFRESH_MBS_MAX 300 +#define AT_SLICE_BOUNDARY \ + V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY + +static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) +{ + struct venus_inst *inst = ctrl_to_inst(ctrl); + struct venc_controls *ctr = &inst->controls.enc; + + switch (ctrl->id) { + case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: + ctr->bitrate_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_BITRATE: + ctr->bitrate = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: + ctr->bitrate_peak = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: + ctr->h264_entropy_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: + ctr->profile.mpeg4 = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_PROFILE: + ctr->profile.h264 = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_VPX_PROFILE: + ctr->profile.vpx = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: + ctr->level.mpeg4 = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_LEVEL: + ctr->level.h264 = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: + ctr->h264_i_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: + ctr->h264_p_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: + ctr->h264_b_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_MIN_QP: + ctr->h264_min_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_MAX_QP: + ctr->h264_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: + ctr->multi_slice_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: + ctr->multi_slice_max_bytes = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: + ctr->multi_slice_max_mb = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: + ctr->h264_loop_filter_alpha = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: + ctr->h264_loop_filter_beta = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: + ctr->h264_loop_filter_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_HEADER_MODE: + ctr->header_mode = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB: + break; + case V4L2_CID_MPEG_VIDEO_GOP_SIZE: + ctr->gop_size = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD: + ctr->h264_i_period = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP: + ctr->vp8_min_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: + ctr->vp8_max_qp = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_B_FRAMES: + ctr->num_b_frames = ctrl->val; + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct v4l2_ctrl_ops venc_ctrl_ops = { + .s_ctrl = venc_op_s_ctrl, +}; + +int venc_ctrl_init(struct venus_inst *inst) +{ + int ret; + + ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 27); + if (ret) + return ret; + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_BITRATE_MODE, + V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, + ~((1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) | + (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)), + V4L2_MPEG_VIDEO_BITRATE_MODE_VBR); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE, + V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC, + 0, V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE, + V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY, + ~((1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE) | + (1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE)), + V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL, + V4L2_MPEG_VIDEO_MPEG4_LEVEL_5, + 0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_PROFILE, + V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH, + ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) | + (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH)), + V4L2_MPEG_VIDEO_H264_PROFILE_HIGH); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LEVEL, + V4L2_MPEG_VIDEO_H264_LEVEL_5_1, + 0, V4L2_MPEG_VIDEO_H264_LEVEL_1_0); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE, + AT_SLICE_BOUNDARY, + 0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_HEADER_MODE, + V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME, + 1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME, + V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE); + + v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE, + V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES, + 0, V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_BITRATE, BITRATE_MIN, BITRATE_MAX, + BITRATE_STEP, BITRATE_DEFAULT); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, BITRATE_MIN, BITRATE_MAX, + BITRATE_STEP, BITRATE_DEFAULT_PEAK); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_VPX_PROFILE, 0, 3, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 26); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 1, 51, 1, 28); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, 1, 51, 1, 30); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 1, 51, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 1, 51, 1, 51); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, SLICE_BYTE_SIZE_MIN, + SLICE_BYTE_SIZE_MAX, 1, SLICE_BYTE_SIZE_MIN); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1, + SLICE_MB_SIZE_MAX, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, + 0, INTRA_REFRESH_MBS_MAX, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, (1 << 16) - 1, 1, 12); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_VPX_MIN_QP, 1, 128, 1, 1); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_VPX_MAX_QP, 1, 128, 1, 128); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 4, 1, 0); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, 0, (1 << 16) - 1, 1, 0); + + ret = inst->ctrl_handler.error; + if (ret) + goto err; + + ret = v4l2_ctrl_handler_setup(&inst->ctrl_handler); + if (ret) + goto err; + + return 0; +err: + v4l2_ctrl_handler_free(&inst->ctrl_handler); + return ret; +} + +void venc_ctrl_deinit(struct venus_inst *inst) +{ + v4l2_ctrl_handler_free(&inst->ctrl_handler); +} diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index c52d94c018bb..5c5912bf500f 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1214,10 +1214,6 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_SGBRG10: descr = "10-bit Bayer GBGB/RGRG"; break; case V4L2_PIX_FMT_SGRBG10: descr = "10-bit Bayer GRGR/BGBG"; break; case V4L2_PIX_FMT_SRGGB10: descr = "10-bit Bayer RGRG/GBGB"; break; - case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break; - case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break; - case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break; - case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break; case V4L2_PIX_FMT_SBGGR10P: descr = "10-bit Bayer BGBG/GRGR Packed"; break; case V4L2_PIX_FMT_SGBRG10P: descr = "10-bit Bayer GBGB/RGRG Packed"; break; case V4L2_PIX_FMT_SGRBG10P: descr = "10-bit Bayer GRGR/BGBG Packed"; break; @@ -1230,6 +1226,14 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_SGBRG10DPCM8: descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break; case V4L2_PIX_FMT_SGRBG10DPCM8: descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break; case V4L2_PIX_FMT_SRGGB10DPCM8: descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break; + case V4L2_PIX_FMT_SBGGR12: descr = "12-bit Bayer BGBG/GRGR"; break; + case V4L2_PIX_FMT_SGBRG12: descr = "12-bit Bayer GBGB/RGRG"; break; + case V4L2_PIX_FMT_SGRBG12: descr = "12-bit Bayer GRGR/BGBG"; break; + case V4L2_PIX_FMT_SRGGB12: descr = "12-bit Bayer RGRG/GBGB"; break; + case V4L2_PIX_FMT_SBGGR12P: descr = "12-bit Bayer BGBG/GRGR Packed"; break; + case V4L2_PIX_FMT_SGBRG12P: descr = "12-bit Bayer GBGB/RGRG Packed"; break; + case V4L2_PIX_FMT_SGRBG12P: descr = "12-bit Bayer GRGR/BGBG Packed"; break; + case V4L2_PIX_FMT_SRGGB12P: descr = "12-bit Bayer RGRG/GBGB Packed"; break; case V4L2_PIX_FMT_SBGGR16: descr = "16-bit Bayer BGBG/GRGR (Exp.)"; break; case V4L2_PIX_FMT_SN9C20X_I420: descr = "GSPCA SN9C20X I420"; break; case V4L2_PIX_FMT_SPCA501: descr = "GSPCA SPCA501"; break; @@ -1269,6 +1273,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break; case V4L2_PIX_FMT_VC1_ANNEX_L: descr = "VC-1 (SMPTE 412M Annex L)"; break; case V4L2_PIX_FMT_VP8: descr = "VP8"; break; + case V4L2_PIX_FMT_VP9: descr = "VP9"; break; case V4L2_PIX_FMT_CPIA1: descr = "GSPCA CPiA YUV"; break; case V4L2_PIX_FMT_WNVA: descr = "WNVA"; break; case V4L2_PIX_FMT_SN9C10X: descr = "GSPCA SN9C10X"; break; @@ -2133,6 +2138,47 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops, -EINVAL; } +/* + * The selection API specified originally that the _MPLANE buffer types + * shouldn't be used. The reasons for this are lost in the mists of time + * (or just really crappy memories). Regardless, this is really annoying + * for userspace. So to keep things simple we map _MPLANE buffer types + * to their 'regular' counterparts before calling the driver. And we + * restore it afterwards. This way applications can use either buffer + * type and drivers don't need to check for both. + */ +static int v4l_g_selection(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_selection *p = arg; + u32 old_type = p->type; + int ret; + + if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + ret = ops->vidioc_g_selection(file, fh, p); + p->type = old_type; + return ret; +} + +static int v4l_s_selection(const struct v4l2_ioctl_ops *ops, + struct file *file, void *fh, void *arg) +{ + struct v4l2_selection *p = arg; + u32 old_type = p->type; + int ret; + + if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + ret = ops->vidioc_s_selection(file, fh, p); + p->type = old_type; + return ret; +} + static int v4l_g_crop(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { @@ -2152,7 +2198,7 @@ static int v4l_g_crop(const struct v4l2_ioctl_ops *ops, else s.target = V4L2_SEL_TGT_CROP_ACTIVE; - ret = ops->vidioc_g_selection(file, fh, &s); + ret = v4l_g_selection(ops, file, fh, &s); /* copying results to old structure on success */ if (!ret) @@ -2179,7 +2225,7 @@ static int v4l_s_crop(const struct v4l2_ioctl_ops *ops, else s.target = V4L2_SEL_TGT_CROP_ACTIVE; - return ops->vidioc_s_selection(file, fh, &s); + return v4l_s_selection(ops, file, fh, &s); } static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, @@ -2221,7 +2267,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, else s.target = V4L2_SEL_TGT_CROP_BOUNDS; - ret = ops->vidioc_g_selection(file, fh, &s); + ret = v4l_g_selection(ops, file, fh, &s); if (ret) return ret; p->bounds = s.r; @@ -2232,7 +2278,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops, else s.target = V4L2_SEL_TGT_CROP_DEFAULT; - ret = ops->vidioc_g_selection(file, fh, &s); + ret = v4l_g_selection(ops, file, fh, &s); if (ret) return ret; p->defrect = s.r; @@ -2542,8 +2588,8 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = { IOCTL_INFO_FNC(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)), IOCTL_INFO_FNC(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)), IOCTL_INFO_FNC(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO), - IOCTL_INFO_STD(VIDIOC_G_SELECTION, vidioc_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)), - IOCTL_INFO_STD(VIDIOC_S_SELECTION, vidioc_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)), + IOCTL_INFO_FNC(VIDIOC_G_SELECTION, v4l_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)), + IOCTL_INFO_FNC(VIDIOC_S_SELECTION, v4l_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)), IOCTL_INFO_STD(VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp, v4l_print_jpegcompression, 0), IOCTL_INFO_STD(VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO), IOCTL_INFO_FNC(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0), diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 6bc27e7b2a33..f62e68aa04c4 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -126,6 +126,43 @@ void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) } EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); +void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + struct v4l2_m2m_buffer *b; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + b = container_of(vbuf, struct v4l2_m2m_buffer, vb); + list_del(&b->list); + q_ctx->num_rdy--; + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); + +struct vb2_v4l2_buffer * +v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) + +{ + struct v4l2_m2m_buffer *b, *tmp; + struct vb2_v4l2_buffer *ret = NULL; + unsigned long flags; + + spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); + list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { + if (b->vb.vb2_buf.index == idx) { + list_del(&b->list); + q_ctx->num_rdy--; + ret = &b->vb; + break; + } + } + spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); + /* * Scheduling handlers */ diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 9ccf7f5e0e2e..622ddbb2216f 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -200,6 +200,9 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) int plane; int ret = -ENOMEM; + if (q->bidirectional) + dma_dir = DMA_BIDIRECTIONAL; + /* * Allocate memory for all planes in this buffer * NOTE: mmapped areas should be page aligned diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index df990bb8c873..98d84965b7a1 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -77,6 +77,7 @@ static unsigned int fmax = 515633; * @qcom_fifo: enables qcom specific fifo pio read logic. * @qcom_dml: enables qcom specific dma glue for dma transfers. * @reversed_irq_handling: handle data irq before cmd irq. + * @any_blksize: true if block any sizes are supported */ struct variant_data { unsigned int clkreg; @@ -103,6 +104,7 @@ struct variant_data { bool qcom_fifo; bool qcom_dml; bool reversed_irq_handling; + bool any_blksize; }; static struct variant_data variant_arm = { @@ -200,6 +202,7 @@ static struct variant_data variant_ux500v2 = { .pwrreg_clkgate = true, .busy_detect = true, .pwrreg_nopower = true, + .any_blksize = true, }; static struct variant_data variant_qcom = { @@ -218,6 +221,7 @@ static struct variant_data variant_qcom = { .explicit_mclk_control = true, .qcom_fifo = true, .qcom_dml = true, + .any_blksize = true, }; static int mmci_card_busy(struct mmc_host *mmc) @@ -240,10 +244,11 @@ static int mmci_card_busy(struct mmc_host *mmc) static int mmci_validate_data(struct mmci_host *host, struct mmc_data *data) { + struct variant_data *variant = host->variant; + if (!data) return 0; - - if (!is_power_of_2(data->blksz)) { + if (!is_power_of_2(data->blksz) && !variant->any_blksize) { dev_err(mmc_dev(host->mmc), "unsupported block size (%d bytes)\n", data->blksz); return -EINVAL; @@ -796,7 +801,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) writel(host->size, base + MMCIDATALENGTH); blksz_bits = ffs(data->blksz) - 1; - BUG_ON(1 << blksz_bits != data->blksz); if (variant->blksz_datactrl16) datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig index 591ebaea8265..20bf967a70b9 100644 --- a/drivers/net/wireless/ath/wcn36xx/Kconfig +++ b/drivers/net/wireless/ath/wcn36xx/Kconfig @@ -1,6 +1,8 @@ config WCN36XX tristate "Qualcomm Atheros WCN3660/3680 support" depends on MAC80211 && HAS_DMA + depends on QCOM_WCNSS_CTRL || QCOM_WCNSS_CTRL=n + depends on RPMSG || RPMSG=n ---help--- This module adds support for wireless adapters based on Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets. diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 231fd022f0f5..87dfdaf9044c 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -23,6 +23,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/interrupt.h> +#include <linux/soc/qcom/smem_state.h> #include "wcn36xx.h" #include "txrx.h" @@ -151,9 +152,12 @@ int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn) goto out_err; /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */ - ret = wcn->ctrl_ops->smsm_change_state( - WCN36XX_SMSM_WLAN_TX_ENABLE, - WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY); + ret = qcom_smem_state_update_bits(wcn->tx_enable_state, + WCN36XX_SMSM_WLAN_TX_ENABLE | + WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY, + WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY); + if (ret) + goto out_err; return 0; @@ -678,9 +682,9 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, * notify chip about new frame through SMSM bus. */ if (is_low && vif_priv->pw_state == WCN36XX_BMPS) { - wcn->ctrl_ops->smsm_change_state( - 0, - WCN36XX_SMSM_WLAN_TX_ENABLE); + qcom_smem_state_update_bits(wcn->tx_rings_empty_state, + WCN36XX_SMSM_WLAN_TX_ENABLE, + WCN36XX_SMSM_WLAN_TX_ENABLE); } else { /* indicate End Of Packet and generate interrupt on descriptor * done. diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 4f87ef1e1eb8..b765c647319d 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -350,6 +350,8 @@ enum wcn36xx_hal_host_msg_type { WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233, + WCN36XX_HAL_PRINT_REG_INFO_IND = 259, + WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE }; @@ -4703,4 +4705,18 @@ struct stats_class_b_ind { u32 rx_time_total; }; +/* WCN36XX_HAL_PRINT_REG_INFO_IND */ +struct wcn36xx_hal_print_reg_info_ind { + struct wcn36xx_hal_msg_header header; + + u32 count; + u32 scenario; + u32 reason; + + struct { + u32 addr; + u32 value; + } regs[]; +} __packed; + #endif /* _HAL_H_ */ diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index e1d59da2ad20..f0b4d4325ff7 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -21,6 +21,10 @@ #include <linux/platform_device.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/rpmsg.h> +#include <linux/soc/qcom/smem_state.h> +#include <linux/soc/qcom/wcnss_ctrl.h> #include "wcn36xx.h" unsigned int wcn36xx_dbg_mask; @@ -368,6 +372,8 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed); + mutex_lock(&wcn->conf_mutex); + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { int ch = WCN36XX_HW_CHANNEL(wcn); wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n", @@ -378,6 +384,8 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) } } + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -392,6 +400,8 @@ static void wcn36xx_configure_filter(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n"); + mutex_lock(&wcn->conf_mutex); + *total &= FIF_ALLMULTI; fp = (void *)(unsigned long)multicast; @@ -404,6 +414,8 @@ static void wcn36xx_configure_filter(struct ieee80211_hw *hw, else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc) wcn36xx_smd_set_mc_list(wcn, vif, fp); } + + mutex_unlock(&wcn->conf_mutex); kfree(fp); } @@ -467,6 +479,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, key_conf->key, key_conf->keylen); + mutex_lock(&wcn->conf_mutex); + switch (key_conf->cipher) { case WLAN_CIPHER_SUITE_WEP40: vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40; @@ -561,26 +575,86 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } out: + mutex_unlock(&wcn->conf_mutex); + return ret; } -static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const u8 *mac_addr) +static void wcn36xx_hw_scan_worker(struct work_struct *work) { - struct wcn36xx *wcn = hw->priv; + struct wcn36xx *wcn = container_of(work, struct wcn36xx, scan_work); + struct cfg80211_scan_request *req = wcn->scan_req; + u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX]; + struct cfg80211_scan_info scan_info = {}; + bool aborted = false; + int i; + + wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels); + + for (i = 0; i < req->n_channels; i++) + channels[i] = req->channels[i]->hw_value; + + wcn36xx_smd_update_scan_params(wcn, channels, req->n_channels); wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN); - wcn36xx_smd_start_scan(wcn); + for (i = 0; i < req->n_channels; i++) { + mutex_lock(&wcn->scan_lock); + aborted = wcn->scan_aborted; + mutex_unlock(&wcn->scan_lock); + + if (aborted) + break; + + wcn->scan_freq = req->channels[i]->center_freq; + wcn->scan_band = req->channels[i]->band; + + wcn36xx_smd_start_scan(wcn, req->channels[i]->hw_value); + msleep(30); + wcn36xx_smd_end_scan(wcn, req->channels[i]->hw_value); + + wcn->scan_freq = 0; + } + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN); + + scan_info.aborted = aborted; + ieee80211_scan_completed(wcn->hw, &scan_info); + + mutex_lock(&wcn->scan_lock); + wcn->scan_req = NULL; + mutex_unlock(&wcn->scan_lock); } -static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static int wcn36xx_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) { struct wcn36xx *wcn = hw->priv; - wcn36xx_smd_end_scan(wcn); - wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN); + mutex_lock(&wcn->scan_lock); + if (wcn->scan_req) { + mutex_unlock(&wcn->scan_lock); + return -EBUSY; + } + + wcn->scan_aborted = false; + wcn->scan_req = &hw_req->req; + mutex_unlock(&wcn->scan_lock); + + schedule_work(&wcn->scan_work); + + return 0; +} + +static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct wcn36xx *wcn = hw->priv; + + mutex_lock(&wcn->scan_lock); + wcn->scan_aborted = true; + mutex_unlock(&wcn->scan_lock); + + cancel_work_sync(&wcn->scan_work); } static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, @@ -663,6 +737,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n", vif, changed); + mutex_lock(&wcn->conf_mutex); + if (changed & BSS_CHANGED_BEACON_INFO) { wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed dtim period %d\n", @@ -725,12 +801,17 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, bss_conf->aid); vif_priv->sta_assoc = true; - rcu_read_lock(); + + /* + * Holding conf_mutex ensures mutal exclusion with + * wcn36xx_sta_remove() and as such ensures that sta + * won't be freed while we're operating on it. As such + * we do not need to hold the rcu_read_lock(). + */ sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!sta) { wcn36xx_err("sta %pM is not found\n", bss_conf->bssid); - rcu_read_unlock(); goto out; } sta_priv = wcn36xx_sta_to_priv(sta); @@ -749,7 +830,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, * place where AID is available. */ wcn36xx_smd_config_sta(wcn, vif, sta); - rcu_read_unlock(); } else { wcn36xx_dbg(WCN36XX_DBG_MAC, "disassociated bss %pM vif %pM AID=%d\n", @@ -811,6 +891,9 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, } } out: + + mutex_unlock(&wcn->conf_mutex); + return; } @@ -820,7 +903,10 @@ static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value) struct wcn36xx *wcn = hw->priv; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value); + mutex_lock(&wcn->conf_mutex); wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value); + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -831,8 +917,12 @@ static void wcn36xx_remove_interface(struct ieee80211_hw *hw, struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif); + mutex_lock(&wcn->conf_mutex); + list_del(&vif_priv->list); wcn36xx_smd_delete_sta_self(wcn, vif->addr); + + mutex_unlock(&wcn->conf_mutex); } static int wcn36xx_add_interface(struct ieee80211_hw *hw, @@ -853,9 +943,13 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw, return -EOPNOTSUPP; } + mutex_lock(&wcn->conf_mutex); + list_add(&vif_priv->list, &wcn->vif_list); wcn36xx_smd_add_sta_self(wcn, vif); + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -868,6 +962,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n", vif, sta->addr); + mutex_lock(&wcn->conf_mutex); + spin_lock_init(&sta_priv->ampdu_lock); sta_priv->vif = vif_priv; /* @@ -879,6 +975,9 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, sta_priv->aid = sta->aid; wcn36xx_smd_config_sta(wcn, vif, sta); } + + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -892,8 +991,13 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n", vif, sta->addr, sta_priv->sta_index); + mutex_lock(&wcn->conf_mutex); + wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index); sta_priv->vif = NULL; + + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -937,6 +1041,8 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n", action, tid); + mutex_lock(&wcn->conf_mutex); + switch (action) { case IEEE80211_AMPDU_RX_START: sta_priv->tid = tid; @@ -976,6 +1082,8 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, wcn36xx_err("Unknown AMPDU action\n"); } + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -993,8 +1101,8 @@ static const struct ieee80211_ops wcn36xx_ops = { .configure_filter = wcn36xx_configure_filter, .tx = wcn36xx_tx, .set_key = wcn36xx_set_key, - .sw_scan_start = wcn36xx_sw_scan_start, - .sw_scan_complete = wcn36xx_sw_scan_complete, + .hw_scan = wcn36xx_hw_scan, + .cancel_hw_scan = wcn36xx_cancel_hw_scan, .bss_info_changed = wcn36xx_bss_info_changed, .set_rts_threshold = wcn36xx_set_rts_threshold, .sta_add = wcn36xx_sta_add, @@ -1019,6 +1127,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) ieee80211_hw_set(wcn->hw, SUPPORTS_PS); ieee80211_hw_set(wcn->hw, SIGNAL_DBM); ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL); + ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS); wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | @@ -1026,7 +1135,11 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) BIT(NL80211_IFTYPE_MESH_POINT); wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz; - wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz; + if (wcn->rf_id != RF_IRIS_WCN3620) + wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz; + + wcn->hw->wiphy->max_scan_ssids = WCN36XX_MAX_SCAN_SSIDS; + wcn->hw->wiphy->max_scan_ie_len = WCN36XX_MAX_SCAN_IE_LEN; wcn->hw->wiphy->cipher_suites = cipher_suites; wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); @@ -1053,13 +1166,13 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, struct platform_device *pdev) { struct device_node *mmio_node; + struct device_node *iris_node; struct resource *res; int index; int ret; /* Set TX IRQ */ - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, - "wcnss_wlantx_irq"); + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tx"); if (!res) { wcn36xx_err("failed to get tx_irq\n"); return -ENOENT; @@ -1067,14 +1180,29 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, wcn->tx_irq = res->start; /* Set RX IRQ */ - res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, - "wcnss_wlanrx_irq"); + res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "rx"); if (!res) { wcn36xx_err("failed to get rx_irq\n"); return -ENOENT; } wcn->rx_irq = res->start; + /* Acquire SMSM tx enable handle */ + wcn->tx_enable_state = qcom_smem_state_get(&pdev->dev, + "tx-enable", &wcn->tx_enable_state_bit); + if (IS_ERR(wcn->tx_enable_state)) { + wcn36xx_err("failed to get tx-enable state\n"); + return PTR_ERR(wcn->tx_enable_state); + } + + /* Acquire SMSM tx rings empty handle */ + wcn->tx_rings_empty_state = qcom_smem_state_get(&pdev->dev, + "tx-rings-empty", &wcn->tx_rings_empty_state_bit); + if (IS_ERR(wcn->tx_rings_empty_state)) { + wcn36xx_err("failed to get tx-rings-empty state\n"); + return PTR_ERR(wcn->tx_rings_empty_state); + } + mmio_node = of_parse_phandle(pdev->dev.parent->of_node, "qcom,mmio", 0); if (!mmio_node) { wcn36xx_err("failed to acquire qcom,mmio reference\n"); @@ -1101,6 +1229,14 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, goto unmap_ccu; } + /* External RF module */ + iris_node = of_find_node_by_name(mmio_node, "iris"); + if (iris_node) { + if (of_device_is_compatible(iris_node, "qcom,wcn3620")) + wcn->rf_id = RF_IRIS_WCN3620; + of_node_put(iris_node); + } + of_node_put(mmio_node); return 0; @@ -1115,11 +1251,14 @@ static int wcn36xx_probe(struct platform_device *pdev) { struct ieee80211_hw *hw; struct wcn36xx *wcn; + void *wcnss; int ret; - u8 addr[ETH_ALEN]; + const u8 *addr; wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n"); + wcnss = dev_get_drvdata(pdev->dev.parent); + hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops); if (!hw) { wcn36xx_err("failed to alloc hw\n"); @@ -1130,11 +1269,25 @@ static int wcn36xx_probe(struct platform_device *pdev) wcn = hw->priv; wcn->hw = hw; wcn->dev = &pdev->dev; - wcn->ctrl_ops = pdev->dev.platform_data; - + mutex_init(&wcn->conf_mutex); mutex_init(&wcn->hal_mutex); + mutex_init(&wcn->scan_lock); - if (!wcn->ctrl_ops->get_hw_mac(addr)) { + INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker); + + wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw); + if (IS_ERR(wcn->smd_channel)) { + wcn36xx_err("failed to open WLAN_CTRL channel\n"); + ret = PTR_ERR(wcn->smd_channel); + goto out_wq; + } + + addr = of_get_property(pdev->dev.of_node, "local-mac-address", &ret); + if (addr && ret != ETH_ALEN) { + wcn36xx_err("invalid local-mac-address\n"); + ret = -EINVAL; + goto out_wq; + } else if (addr) { wcn36xx_info("mac address: %pM\n", addr); SET_IEEE80211_PERM_ADDR(wcn->hw, addr); } @@ -1158,6 +1311,7 @@ out_wq: out_err: return ret; } + static int wcn36xx_remove(struct platform_device *pdev) { struct ieee80211_hw *hw = platform_get_drvdata(pdev); @@ -1165,45 +1319,39 @@ static int wcn36xx_remove(struct platform_device *pdev) wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n"); release_firmware(wcn->nv); - mutex_destroy(&wcn->hal_mutex); ieee80211_unregister_hw(hw); + + qcom_smem_state_put(wcn->tx_enable_state); + qcom_smem_state_put(wcn->tx_rings_empty_state); + + rpmsg_destroy_ept(wcn->smd_channel); + iounmap(wcn->dxe_base); iounmap(wcn->ccu_base); + + mutex_destroy(&wcn->hal_mutex); ieee80211_free_hw(hw); return 0; } -static const struct platform_device_id wcn36xx_platform_id_table[] = { - { - .name = "wcn36xx", - .driver_data = 0 - }, + +static const struct of_device_id wcn36xx_of_match[] = { + { .compatible = "qcom,wcnss-wlan" }, {} }; -MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table); +MODULE_DEVICE_TABLE(of, wcn36xx_of_match); static struct platform_driver wcn36xx_driver = { .probe = wcn36xx_probe, .remove = wcn36xx_remove, .driver = { .name = "wcn36xx", + .of_match_table = wcn36xx_of_match, }, - .id_table = wcn36xx_platform_id_table, }; -static int __init wcn36xx_init(void) -{ - platform_driver_register(&wcn36xx_driver); - return 0; -} -module_init(wcn36xx_init); - -static void __exit wcn36xx_exit(void) -{ - platform_driver_unregister(&wcn36xx_driver); -} -module_exit(wcn36xx_exit); +module_platform_driver(wcn36xx_driver); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com"); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index a443992320f2..9c6590d5348a 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -19,6 +19,7 @@ #include <linux/etherdevice.h> #include <linux/firmware.h> #include <linux/bitops.h> +#include <linux/rpmsg.h> #include "smd.h" struct wcn36xx_cfg_val { @@ -253,7 +254,7 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len) init_completion(&wcn->hal_rsp_compl); start = jiffies; - ret = wcn->ctrl_ops->tx(wcn->hal_buf, len); + ret = rpmsg_send(wcn->smd_channel, wcn->hal_buf, len); if (ret) { wcn36xx_err("HAL TX failed\n"); goto out; @@ -521,7 +522,7 @@ out: return ret; } -int wcn36xx_smd_start_scan(struct wcn36xx *wcn) +int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel) { struct wcn36xx_hal_start_scan_req_msg msg_body; int ret = 0; @@ -529,7 +530,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn) mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ); - msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn); + msg_body.scan_channel = scan_channel; PREPARE_HAL_BUF(wcn->hal_buf, msg_body); @@ -551,7 +552,7 @@ out: return ret; } -int wcn36xx_smd_end_scan(struct wcn36xx *wcn) +int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel) { struct wcn36xx_hal_end_scan_req_msg msg_body; int ret = 0; @@ -559,7 +560,7 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn) mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ); - msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn); + msg_body.scan_channel = scan_channel; PREPARE_HAL_BUF(wcn->hal_buf, msg_body); @@ -2108,6 +2109,30 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn, return -ENOENT; } +static int wcn36xx_smd_print_reg_info_ind(struct wcn36xx *wcn, + void *buf, + size_t len) +{ + struct wcn36xx_hal_print_reg_info_ind *rsp = buf; + int i; + + if (len < sizeof(*rsp)) { + wcn36xx_warn("Corrupted print reg info indication\n"); + return -EIO; + } + + wcn36xx_dbg(WCN36XX_DBG_HAL, + "reginfo indication, scenario: 0x%x reason: 0x%x\n", + rsp->scenario, rsp->reason); + + for (i = 0; i < rsp->count; i++) { + wcn36xx_dbg(WCN36XX_DBG_HAL, "\t0x%x: 0x%x\n", + rsp->regs[i].addr, rsp->regs[i].value); + } + + return 0; +} + int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value) { struct wcn36xx_hal_update_cfg_req_msg msg_body, *body; @@ -2180,9 +2205,12 @@ out: return ret; } -static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) +int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, + void *buf, int len, void *priv, u32 addr) { - struct wcn36xx_hal_msg_header *msg_header = buf; + const struct wcn36xx_hal_msg_header *msg_header = buf; + struct ieee80211_hw *hw = priv; + struct wcn36xx *wcn = hw->priv; struct wcn36xx_hal_ind_msg *msg_ind; wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len); @@ -2233,15 +2261,12 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) case WCN36XX_HAL_OTA_TX_COMPL_IND: case WCN36XX_HAL_MISSED_BEACON_IND: case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: - msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_KERNEL); + case WCN36XX_HAL_PRINT_REG_INFO_IND: + msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC); if (!msg_ind) { - /* - * FIXME: Do something smarter then just - * printing an error. - */ wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n", msg_header->msg_type); - break; + return -ENOMEM; } msg_ind->msg_len = len; @@ -2257,6 +2282,8 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) wcn36xx_err("SMD_EVENT (%d) not supported\n", msg_header->msg_type); } + + return 0; } static void wcn36xx_ind_smd_work(struct work_struct *work) { @@ -2294,6 +2321,11 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) hal_ind_msg->msg, hal_ind_msg->msg_len); break; + case WCN36XX_HAL_PRINT_REG_INFO_IND: + wcn36xx_smd_print_reg_info_ind(wcn, + hal_ind_msg->msg, + hal_ind_msg->msg_len); + break; default: wcn36xx_err("SMD_EVENT (%d) not supported\n", msg_header->msg_type); @@ -2315,22 +2347,13 @@ int wcn36xx_smd_open(struct wcn36xx *wcn) INIT_LIST_HEAD(&wcn->hal_ind_queue); spin_lock_init(&wcn->hal_ind_lock); - ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process); - if (ret) { - wcn36xx_err("failed to open control channel\n"); - goto free_wq; - } - - return ret; + return 0; -free_wq: - destroy_workqueue(wcn->hal_ind_wq); out: return ret; } void wcn36xx_smd_close(struct wcn36xx *wcn) { - wcn->ctrl_ops->close(); destroy_workqueue(wcn->hal_ind_wq); } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index df80cbbd9d1b..013fc9546f56 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -51,6 +51,7 @@ struct wcn36xx_hal_ind_msg { }; struct wcn36xx; +struct rpmsg_device; int wcn36xx_smd_open(struct wcn36xx *wcn); void wcn36xx_smd_close(struct wcn36xx *wcn); @@ -59,8 +60,8 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn); int wcn36xx_smd_start(struct wcn36xx *wcn); int wcn36xx_smd_stop(struct wcn36xx *wcn); int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode); -int wcn36xx_smd_start_scan(struct wcn36xx *wcn); -int wcn36xx_smd_end_scan(struct wcn36xx *wcn); +int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel); +int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel); int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode); int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count); @@ -127,6 +128,10 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index); int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index); int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value); + +int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, + void *buf, int len, void *priv, u32 addr); + int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp); diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index 1f34c2e912d7..8c387a0a3c09 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -45,9 +45,20 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len); skb_pull(skb, bd->pdu.mpdu_header_off); + hdr = (struct ieee80211_hdr *) skb->data; + fc = __le16_to_cpu(hdr->frame_control); + sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); + + /* When scanning associate beacons to this */ + if (ieee80211_is_beacon(hdr->frame_control) && wcn->scan_freq) { + status.freq = wcn->scan_freq; + status.band = wcn->scan_band; + } else { + status.freq = WCN36XX_CENTER_FREQ(wcn); + status.band = WCN36XX_BAND(wcn); + } + status.mactime = 10; - status.freq = WCN36XX_CENTER_FREQ(wcn); - status.band = WCN36XX_BAND(wcn); status.signal = -get_rssi0(bd); status.antenna = 1; status.rate_idx = 1; @@ -61,10 +72,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); - hdr = (struct ieee80211_hdr *) skb->data; - fc = __le16_to_cpu(hdr->frame_control); - sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); - if (ieee80211_is_beacon(hdr->frame_control)) { wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n", skb, skb->len, fc, sn); diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 22242d18e1fe..81017e6703b4 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -35,6 +35,9 @@ /* How many frames until we start a-mpdu TX session */ #define WCN36XX_AMPDU_START_THRESH 20 +#define WCN36XX_MAX_SCAN_SSIDS 9 +#define WCN36XX_MAX_SCAN_IE_LEN 500 + extern unsigned int wcn36xx_dbg_mask; enum wcn36xx_debug_mask { @@ -91,6 +94,9 @@ enum wcn36xx_ampdu_state { #define WCN36XX_FLAGS(__wcn) (__wcn->hw->flags) #define WCN36XX_MAX_POWER(__wcn) (__wcn->hw->conf.chandef.chan->max_power) +#define RF_UNKNOWN 0x0000 +#define RF_IRIS_WCN3620 0x3620 + static inline void buff_to_be(u32 *buf, size_t len) { int i; @@ -103,19 +109,6 @@ struct nv_data { u8 table; }; -/* Interface for platform control path - * - * @open: hook must be called when wcn36xx wants to open control channel. - * @tx: sends a buffer. - */ -struct wcn36xx_platform_ctrl_ops { - int (*open)(void *drv_priv, void *rsp_cb); - void (*close)(void); - int (*tx)(char *buf, size_t len); - int (*get_hw_mac)(u8 *addr); - int (*smsm_change_state)(u32 clear_mask, u32 set_mask); -}; - /** * struct wcn36xx_vif - holds VIF related fields * @@ -205,7 +198,16 @@ struct wcn36xx { void __iomem *ccu_base; void __iomem *dxe_base; - struct wcn36xx_platform_ctrl_ops *ctrl_ops; + struct rpmsg_endpoint *smd_channel; + + struct qcom_smem_state *tx_enable_state; + unsigned tx_enable_state_bit; + struct qcom_smem_state *tx_rings_empty_state; + unsigned tx_rings_empty_state_bit; + + /* prevents concurrent FW reconfiguration */ + struct mutex conf_mutex; + /* * smd_buf must be protected with smd_mutex to garantee * that all messages are sent one after another @@ -219,6 +221,13 @@ struct wcn36xx { spinlock_t hal_ind_lock; struct list_head hal_ind_queue; + struct work_struct scan_work; + struct cfg80211_scan_request *scan_req; + int scan_freq; + int scan_band; + struct mutex scan_lock; + bool scan_aborted; + /* DXE channels */ struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */ struct wcn36xx_dxe_ch dxe_tx_h_ch; /* TX high */ @@ -235,6 +244,9 @@ struct wcn36xx { struct sk_buff *tx_ack_skb; + /* RF module */ + unsigned rf_id; + #ifdef CONFIG_WCN36XX_DEBUGFS /* Debug file system entry */ struct wcn36xx_dfs_entry dfs; diff --git a/drivers/of/address.c b/drivers/of/address.c index 72914cdfce2a..af1313109479 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -819,8 +819,8 @@ EXPORT_SYMBOL(of_io_request_and_map); * CPU addr (phys_addr_t) : pna cells * size : nsize cells * - * It returns -ENODEV if "dma-ranges" property was not found - * for this device in DT. + * Return 0 on success, -ENODEV if the "dma-ranges" property was not found for + * this device in DT, or -EINVAL if the CPU address or size is invalid. */ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size) { @@ -880,6 +880,22 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz *dma_addr = dmaaddr; *size = of_read_number(ranges + naddr + pna, nsize); + /* + * DT nodes sometimes incorrectly set the size as a mask. Work around + * those incorrect DT by computing the size as mask + 1. + */ + if (*size & 1) { + pr_warn("%s: size 0x%llx for dma-range in node(%s) set as mask\n", + __func__, *size, np->full_name); + *size = *size + 1; + } + + if (!*size) { + pr_err("%s: invalid size zero for dma-range in node(%s)\n", + __func__, np->full_name); + ret = -EINVAL; + goto out; + } pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", *dma_addr, *paddr, *size); diff --git a/drivers/of/device.c b/drivers/of/device.c index f7a970120055..e2f682689cd6 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -82,7 +82,7 @@ int of_device_add(struct platform_device *ofdev) * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events * to fix up DMA configuration. */ -void of_dma_configure(struct device *dev, struct device_node *np) +int of_dma_configure(struct device *dev, struct device_node *np) { u64 dma_addr, paddr, size; int ret; @@ -107,24 +107,9 @@ void of_dma_configure(struct device *dev, struct device_node *np) ret = of_dma_get_range(np, &dma_addr, &paddr, &size); if (ret < 0) { dma_addr = offset = 0; - size = dev->coherent_dma_mask + 1; + size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); } else { offset = PFN_DOWN(paddr - dma_addr); - - /* - * Add a work around to treat the size as mask + 1 in case - * it is defined in DT as a mask. - */ - if (size & 1) { - dev_warn(dev, "Invalid size 0x%llx for dma-range\n", - size); - size = size + 1; - } - - if (!size) { - dev_err(dev, "Adjusted size 0x%llx invalid\n", size); - return; - } dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); } @@ -144,13 +129,30 @@ void of_dma_configure(struct device *dev, struct device_node *np) coherent ? " " : " not "); iommu = of_iommu_configure(dev, np); + if (IS_ERR(iommu)) + return PTR_ERR(iommu); + dev_dbg(dev, "device is%sbehind an iommu\n", iommu ? " " : " not "); arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent); + + return 0; } EXPORT_SYMBOL_GPL(of_dma_configure); +/** + * of_dma_deconfigure - Clean up DMA configuration + * @dev: Device for which to clean up DMA configuration + * + * Clean up all configuration performed by of_dma_configure_ops() and free all + * resources that have been allocated. + */ +void of_dma_deconfigure(struct device *dev) +{ + arch_teardown_dma_ops(dev); +} + int of_device_register(struct platform_device *pdev) { device_initialize(&pdev->dev); diff --git a/drivers/of/platform.c b/drivers/of/platform.c index e4bf07d20f9b..b0a2f9e9b862 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_iommu.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> @@ -155,11 +156,6 @@ struct platform_device *of_device_alloc(struct device_node *np, } EXPORT_SYMBOL(of_device_alloc); -static void of_dma_deconfigure(struct device *dev) -{ - arch_teardown_dma_ops(dev); -} - /** * of_platform_device_create_pdata - Alloc, initialize and register an of_device * @np: pointer to node to create device for @@ -188,11 +184,9 @@ static struct platform_device *of_platform_device_create_pdata( dev->dev.bus = &platform_bus_type; dev->dev.platform_data = platform_data; - of_dma_configure(&dev->dev, dev->dev.of_node); of_msi_configure(&dev->dev, dev->dev.of_node); if (of_device_add(dev) != 0) { - of_dma_deconfigure(&dev->dev); platform_device_put(dev); goto err_clear_flag; } @@ -250,7 +244,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node, dev_set_name(&dev->dev, "%s", bus_id); else of_device_make_bus_id(&dev->dev); - of_dma_configure(&dev->dev, dev->dev.of_node); /* Allow the HW Peripheral ID to be overridden */ prop = of_get_property(node, "arm,primecell-periphid", NULL); @@ -544,7 +537,6 @@ static int of_platform_device_destroy(struct device *dev, void *data) amba_device_unregister(to_amba_device(dev)); #endif - of_dma_deconfigure(dev); of_node_clear_flag(dev->of_node, OF_POPULATED); of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); return 0; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index d266d800f246..b2d8fcba5af3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1746,34 +1746,6 @@ static void pci_set_msi_domain(struct pci_dev *dev) dev_set_msi_domain(&dev->dev, d); } -/** - * pci_dma_configure - Setup DMA configuration - * @dev: ptr to pci_dev struct of the PCI device - * - * Function to update PCI devices's DMA configuration using the same - * info from the OF node or ACPI node of host bridge's parent (if any). - */ -static void pci_dma_configure(struct pci_dev *dev) -{ - struct device *bridge = pci_get_host_bridge_device(dev); - - if (IS_ENABLED(CONFIG_OF) && - bridge->parent && bridge->parent->of_node) { - of_dma_configure(&dev->dev, bridge->parent->of_node); - } else if (has_acpi_companion(bridge)) { - struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); - enum dev_dma_attr attr = acpi_get_dma_attr(adev); - - if (attr == DEV_DMA_NOT_SUPPORTED) - dev_warn(&dev->dev, "DMA not supported.\n"); - else - arch_setup_dma_ops(&dev->dev, 0, 0, NULL, - attr == DEV_DMA_COHERENT); - } - - pci_put_host_bridge_device(bridge); -} - void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) { int ret; @@ -1787,7 +1759,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) dev->dev.dma_mask = &dev->dma_mask; dev->dev.dma_parms = &dev->dma_parms; dev->dev.coherent_dma_mask = 0xffffffffull; - pci_dma_configure(dev); pci_set_dma_max_seg_size(dev, 65536); pci_set_dma_seg_boundary(dev, 0xffffffff); diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig index a67eeace6a89..44d9f5bdc898 100644 --- a/drivers/power/avs/Kconfig +++ b/drivers/power/avs/Kconfig @@ -11,6 +11,21 @@ menuconfig POWER_AVS Say Y here to enable Adaptive Voltage Scaling class support. +config QCOM_CPR + tristate "QCOM Core Power Reduction (CPR) support" + depends on POWER_AVS + select PM_OPP + help + Say Y here to enable support for the CPR hardware found on Qualcomm + SoCs like MSM8916. + + This driver populates CPU OPPs tables and makes adjustments to the + tables based on feedback from the CPR hardware. If you want to do + CPUfrequency scaling say Y here. + + To compile this driver as a module, choose M here: the module will + be called qcom-cpr + config ROCKCHIP_IODOMAIN tristate "Rockchip IO domain support" depends on POWER_AVS && ARCH_ROCKCHIP && OF diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile index ba4c7bc69225..88f4d5d49cba 100644 --- a/drivers/power/avs/Makefile +++ b/drivers/power/avs/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o +obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c new file mode 100644 index 000000000000..a1f25b21ca31 --- /dev/null +++ b/drivers/power/avs/qcom-cpr.c @@ -0,0 +1,2003 @@ +/* + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/interrupt.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <linux/regulator/consumer.h> +#include <linux/cpufreq.h> +#include <linux/bitops.h> +#include <linux/regulator/qcom_smd-regulator.h> + +/* Register Offsets for RB-CPR and Bit Definitions */ + +/* RBCPR Version Register */ +#define REG_RBCPR_VERSION 0 +#define RBCPR_VER_2 0x02 + +/* RBCPR Gate Count and Target Registers */ +#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * n) + +#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0 +#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0) +#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12 +#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0) + +/* RBCPR Timer Control */ +#define REG_RBCPR_TIMER_INTERVAL 0x44 +#define REG_RBIF_TIMER_ADJUST 0x4c + +#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0) +#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0 +#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0) +#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4 +#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0) +#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8 + +/* RBCPR Config Register */ +#define REG_RBIF_LIMIT 0x48 +#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0) +#define RBIF_LIMIT_CEILING_SHIFT 6 +#define RBIF_LIMIT_FLOOR_BITS 6 +#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0) + +#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK +#define RBIF_LIMIT_FLOOR_DEFAULT 0 + +#define REG_RBIF_SW_VLEVEL 0x94 +#define RBIF_SW_VLEVEL_DEFAULT 0x20 + +#define REG_RBCPR_STEP_QUOT 0x80 +#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0) +#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0) +#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8 + +/* RBCPR Control Register */ +#define REG_RBCPR_CTL 0x90 + +#define RBCPR_CTL_LOOP_EN BIT(0) +#define RBCPR_CTL_TIMER_EN BIT(3) +#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5) +#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6) +#define RBCPR_CTL_COUNT_MODE BIT(10) +#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0) +#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24 +#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0) +#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28 + +/* RBCPR Ack/Nack Response */ +#define REG_RBIF_CONT_ACK_CMD 0x98 +#define REG_RBIF_CONT_NACK_CMD 0x9c + +/* RBCPR Result status Register */ +#define REG_RBCPR_RESULT_0 0xa0 + +#define RBCPR_RESULT0_BUSY_SHIFT 19 +#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT) +#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18 +#define RBCPR_RESULT0_ERROR_SHIFT 6 +#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0) +#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2 +#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0) +#define RBCPR_RESULT0_STEP_UP_SHIFT 1 + +/* RBCPR Interrupt Control Register */ +#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * n) +#define REG_RBIF_IRQ_CLEAR 0x110 +#define REG_RBIF_IRQ_STATUS 0x114 + +#define CPR_INT_DONE BIT(0) +#define CPR_INT_MIN BIT(1) +#define CPR_INT_DOWN BIT(2) +#define CPR_INT_MID BIT(3) +#define CPR_INT_UP BIT(4) +#define CPR_INT_MAX BIT(5) +#define CPR_INT_CLAMP BIT(6) +#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \ + CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP) +#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN) + +#define CPR_NUM_RING_OSC 8 + +/* RBCPR Clock Control Register */ +#define RBCPR_CLK_SEL_MASK BIT(-1) +#define RBCPR_CLK_SEL_19P2_MHZ 0 +#define RBCPR_CLK_SEL_AHB_CLK BIT(0) + +/* CPR eFuse parameters */ +#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0) + +#define CPR_FUSE_MIN_QUOT_DIFF 50 + +#define SPEED_BIN_NONE UINT_MAX + +#define FUSE_REVISION_UNKNOWN (-1) +#define FUSE_MAP_NO_MATCH (-1) +#define FUSE_PARAM_MATCH_ANY 0xffffffff + +enum vdd_mx_vmin_method { + VDD_MX_VMIN_APC_CORNER_CEILING, + VDD_MX_VMIN_FUSE_CORNER_MAP, +}; + +enum voltage_change_dir { + NO_CHANGE, + DOWN, + UP, +}; + +struct qfprom_offset { + u16 offset; + u8 width; + u8 shift; +}; + +struct cpr_fuse { + struct qfprom_offset ring_osc; + struct qfprom_offset init_voltage; + struct qfprom_offset quotient; + struct qfprom_offset quotient_offset; +}; + +struct fuse_corner_data { + int ref_uV; + int max_uV; + int min_uV; + int max_quot_scale; + int quot_offset; + int quot_scale; + int max_volt_scale; + int vdd_mx_req; +}; + +struct cpr_fuses { + struct qfprom_offset redundant; + u8 redundant_value; + int init_voltage_step; + struct fuse_corner_data *fuse_corner_data; + struct cpr_fuse *cpr_fuse; + struct qfprom_offset *disable; +}; + +struct pvs_bin { + int *uV; +}; + +struct pvs_fuses { + struct qfprom_offset redundant; + u8 redundant_value; + struct qfprom_offset *pvs_fuse; + struct pvs_bin *pvs_bins; +}; + +struct corner_data { + unsigned int fuse_corner; + unsigned long freq; +}; + +struct freq_plan { + u32 speed_bin; + u32 pvs_version; + const struct corner_data **plan; +}; + +struct fuse_conditional_min_volt { + struct qfprom_offset redundant; + u8 expected; + int min_uV; +}; + +struct fuse_uplift_wa { + struct qfprom_offset redundant; + u8 expected; + int uV; + int *quot; + int max_uV; + int speed_bin; +}; + +struct corner_override { + u32 speed_bin; + u32 pvs_version; + int *max_uV; + int *min_uV; +}; + +struct corner_adjustment { + u32 speed_bin; + u32 pvs_version; + u32 cpr_rev; + u8 *ring_osc_idx; + int *fuse_quot; + int *fuse_quot_diff; + int *fuse_quot_min; + int *fuse_quot_offset; + int *fuse_init_uV; + int *quot; + int *init_uV; + bool disable_closed_loop; +}; + +struct cpr_desc { + unsigned int num_fuse_corners; + unsigned int num_corners; + enum vdd_mx_vmin_method vdd_mx_vmin_method; + int min_diff_quot; + int *step_quot; + struct cpr_fuses cpr_fuses; + struct qfprom_offset fuse_revision; + struct qfprom_offset speed_bin; + struct qfprom_offset pvs_version; + struct corner_data *corner_data; + struct freq_plan *freq_plans; + size_t num_freq_plans; + struct pvs_fuses *pvs_fuses; + struct fuse_conditional_min_volt *min_volt_fuse; + struct fuse_uplift_wa *uplift_wa; + struct corner_override *corner_overrides; + size_t num_corner_overrides; + struct corner_adjustment *adjustments; + size_t num_adjustments; + bool reduce_to_fuse_uV; + bool reduce_to_corner_uV; +}; + +struct acc_desc { + unsigned int enable_reg; + u32 enable_mask; + + struct reg_sequence *settings; + struct reg_sequence *override_settings; + int num_regs_per_fuse; + + struct qfprom_offset override; + u8 override_value; +}; + +struct fuse_corner { + int min_uV; + int max_uV; + int uV; + int quot; + int step_quot; + const struct reg_sequence *accs; + int num_accs; + int vdd_mx_req; + unsigned long max_freq; + u8 ring_osc_idx; +}; + +struct corner { + int min_uV; + int max_uV; + int uV; + int last_uV; + int quot_adjust; + u32 save_ctl; + u32 save_irq; + unsigned long freq; + struct fuse_corner *fuse_corner; +}; + +struct cpr_drv { + unsigned int num_fuse_corners; + unsigned int num_corners; + + unsigned int nb_count; + struct notifier_block cpufreq_nb; + bool switching_opp; + struct notifier_block reg_nb; + + unsigned int ref_clk_khz; + unsigned int timer_delay_us; + unsigned int timer_cons_up; + unsigned int timer_cons_down; + unsigned int up_threshold; + unsigned int down_threshold; + unsigned int idle_clocks; + unsigned int gcnt_us; + unsigned int vdd_apc_step_up_limit; + unsigned int vdd_apc_step_down_limit; + unsigned int clamp_timer_interval; + enum vdd_mx_vmin_method vdd_mx_vmin_method; + + struct device *dev; + struct mutex lock; + void __iomem *base; + struct corner *corner; + struct regulator *vdd_apc; + struct regulator *vdd_mx; + struct clk *cpu_clk; + struct device *cpu_dev; + struct regmap *tcsr; + bool loop_disabled; + bool suspended; + u32 gcnt; + unsigned long flags; +#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0) + + struct fuse_corner *fuse_corners; + struct corner *corners; +}; + +static bool cpr_is_allowed(struct cpr_drv *drv) +{ + if (drv->loop_disabled) /* || disabled in software */ + return false; + else + return true; +} + +static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value) +{ + writel_relaxed(value, drv->base + offset); +} + +static u32 cpr_read(struct cpr_drv *drv, u32 offset) +{ + return readl_relaxed(drv->base + offset); +} + +static void +cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value) +{ + u32 val; + + val = readl_relaxed(drv->base + offset); + val &= ~mask; + val |= value & mask; + writel_relaxed(val, drv->base + offset); +} + +static void cpr_irq_clr(struct cpr_drv *drv) +{ + cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL); +} + +static void cpr_irq_clr_nack(struct cpr_drv *drv) +{ + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); +} + +static void cpr_irq_clr_ack(struct cpr_drv *drv) +{ + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); +} + +static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits) +{ + cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits); +} + +static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value) +{ + cpr_masked_write(drv, REG_RBCPR_CTL, mask, value); +} + +static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner) +{ + u32 val, mask; + + if (drv->suspended) + return; + + /* Program Consecutive Up & Down */ + val = drv->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; + val |= drv->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; + mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK; + cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val); + cpr_masked_write(drv, REG_RBCPR_CTL, + RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | + RBCPR_CTL_SW_AUTO_CONT_ACK_EN, + corner->save_ctl); + cpr_irq_set(drv, corner->save_irq); + + if (cpr_is_allowed(drv) /*&& drv->vreg_enabled */ && + corner->max_uV > corner->min_uV) + val = RBCPR_CTL_LOOP_EN; + else + val = 0; + cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val); +} + +static void cpr_ctl_disable(struct cpr_drv *drv) +{ + if (drv->suspended) + return; + + cpr_irq_set(drv, 0); + cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN | + RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0); + cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, + RBIF_TIMER_ADJ_CONS_UP_MASK | + RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0); + cpr_irq_clr(drv); + cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1); + cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1); + cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0); +} + +static bool cpr_ctl_is_enabled(struct cpr_drv *drv) +{ + u32 reg_val; + + reg_val = cpr_read(drv, REG_RBCPR_CTL); + return reg_val & RBCPR_CTL_LOOP_EN; +} + +static bool cpr_ctl_is_busy(struct cpr_drv *drv) +{ + u32 reg_val; + + reg_val = cpr_read(drv, REG_RBCPR_RESULT_0); + return reg_val & RBCPR_RESULT0_BUSY_MASK; +} + +static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner) +{ + corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL); + corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0)); +} + +static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner) +{ + u32 gcnt, ctl, irq, ro_sel, step_quot; + struct fuse_corner *fuse = corner->fuse_corner; + int i; + + ro_sel = fuse->ring_osc_idx; + gcnt = drv->gcnt; + gcnt |= fuse->quot - corner->quot_adjust; + + /* Program the step quotient and idle clocks */ + step_quot = drv->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT; + step_quot |= fuse->step_quot; + cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot); + + /* Clear the target quotient value and gate count of all ROs */ + for (i = 0; i < CPR_NUM_RING_OSC; i++) + cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); + + cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt); + ctl = corner->save_ctl; + cpr_write(drv, REG_RBCPR_CTL, ctl); + irq = corner->save_irq; + cpr_irq_set(drv, irq); + dev_dbg(drv->dev, "gcnt = 0x%08x, ctl = 0x%08x, irq = 0x%08x\n", gcnt, + ctl, irq); +} + +static int +cpr_mx_get(struct cpr_drv *drv, struct fuse_corner *fuse, int apc_volt) +{ + switch (drv->vdd_mx_vmin_method) { + case VDD_MX_VMIN_APC_CORNER_CEILING: + return fuse->max_uV; + case VDD_MX_VMIN_FUSE_CORNER_MAP: + return fuse->vdd_mx_req; + } + + dev_warn(drv->dev, "Failed to get mx\n"); + return 0; +} + +static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f, + struct fuse_corner *end) +{ + if (f < end) { + for (f += 1; f <= end; f++) + regmap_multi_reg_write(tcsr, f->accs, f->num_accs); + } else { + for (f -= 1; f >= end; f--) + regmap_multi_reg_write(tcsr, f->accs, f->num_accs); + } +} + +static int cpr_pre_voltage(struct cpr_drv *drv, + struct fuse_corner *fuse_corner, + enum voltage_change_dir dir, int vdd_mx_vmin) +{ + int ret = 0; + struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; + + if (drv->tcsr && dir == DOWN) + cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); + + if (vdd_mx_vmin && dir == UP) + ret = qcom_rpm_set_corner(drv->vdd_mx, vdd_mx_vmin); + + return ret; +} + +static int cpr_post_voltage(struct cpr_drv *drv, + struct fuse_corner *fuse_corner, + enum voltage_change_dir dir, int vdd_mx_vmin) +{ + int ret = 0; + struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner; + + if (drv->tcsr && dir == UP) + cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner); + + if (vdd_mx_vmin && dir == DOWN) + ret = qcom_rpm_set_corner(drv->vdd_mx, vdd_mx_vmin); + + return ret; +} + +static int cpr_regulator_notifier(struct notifier_block *nb, + unsigned long event, void *d) +{ + struct cpr_drv *drv = container_of(nb, struct cpr_drv, reg_nb); + u32 val, mask; + int last_uV, new_uV; + + switch (event) { + case REGULATOR_EVENT_VOLTAGE_CHANGE: + new_uV = (int)(uintptr_t)d; + break; + default: + return NOTIFY_OK; + } + + mutex_lock(&drv->lock); + + last_uV = drv->corner->last_uV; + + if (drv->switching_opp) { + goto unlock; + } else if (last_uV < new_uV) { + /* Disable auto nack down */ + mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + val = 0; + } else if (last_uV > new_uV) { + /* Restore default threshold for UP */ + mask = RBCPR_CTL_UP_THRESHOLD_MASK; + mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + val = drv->up_threshold; + val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + } else { /* Somehow it's the same? */ + goto unlock; + } + + cpr_ctl_modify(drv, mask, val); + + /* Re-enable default interrupts */ + cpr_irq_set(drv, CPR_INT_DEFAULT); + + /* Ack */ + cpr_irq_clr_ack(drv); + + /* Save register values for the corner */ + cpr_corner_save(drv, drv->corner); + drv->corner->last_uV = new_uV; +unlock: + mutex_unlock(&drv->lock); + + return NOTIFY_OK; +} + +static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir) +{ + u32 val, error_steps, reg_mask; + int last_uV, new_uV, step_uV; + struct corner *corner; + + //step_uV = regulator_get_linear_step(drv->vdd_apc); + step_uV = 12500; /*TODO: Get step volt here */ + corner = drv->corner; + + val = cpr_read(drv, REG_RBCPR_RESULT_0); + + error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT; + error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK; + last_uV = corner->last_uV; + + if (dir == UP) { + if (drv->clamp_timer_interval && + error_steps < drv->up_threshold) { + /* + * Handle the case where another measurement started + * after the interrupt was triggered due to a core + * exiting from power collapse. + */ + error_steps = max(drv->up_threshold, + drv->vdd_apc_step_up_limit); + } + + if (last_uV >= corner->max_uV) { + cpr_irq_clr_nack(drv); + + /* Maximize the UP threshold */ + reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK; + reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT; + val = reg_mask; + cpr_ctl_modify(drv, reg_mask, val); + + /* Disable UP interrupt */ + cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP); + + return 0; + } + + if (error_steps > drv->vdd_apc_step_up_limit) + error_steps = drv->vdd_apc_step_up_limit; + + /* Calculate new voltage */ + new_uV = last_uV + error_steps * step_uV; + new_uV = min(new_uV, corner->max_uV); + } else if (dir == DOWN) { + if (drv->clamp_timer_interval + && error_steps < drv->down_threshold) { + /* + * Handle the case where another measurement started + * after the interrupt was triggered due to a core + * exiting from power collapse. + */ + error_steps = max(drv->down_threshold, + drv->vdd_apc_step_down_limit); + } + + if (last_uV <= corner->min_uV) { + cpr_irq_clr_nack(drv); + + /* Enable auto nack down */ + reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN; + + cpr_ctl_modify(drv, reg_mask, val); + + /* Disable DOWN interrupt */ + cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN); + + return 0; + } + + if (error_steps > drv->vdd_apc_step_down_limit) + error_steps = drv->vdd_apc_step_down_limit; + + /* Calculate new voltage */ + new_uV = last_uV - error_steps * step_uV; + new_uV = max(new_uV, corner->min_uV); + } + + return new_uV; +} + +static irqreturn_t cpr_irq_handler(int irq, void *dev) +{ + struct cpr_drv *drv = dev; + u32 val; + int new_uV = 0; + struct corner *corner; + + mutex_lock(&drv->lock); + + val = cpr_read(drv, REG_RBIF_IRQ_STATUS); + if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS) + val = cpr_read(drv, REG_RBIF_IRQ_STATUS); + + dev_dbg(drv->dev, "IRQ_STATUS = %#02x\n", val); + + if (!cpr_ctl_is_enabled(drv)) { + dev_dbg(drv->dev, "CPR is disabled\n"); + goto unlock; + } else if (cpr_ctl_is_busy(drv) && !drv->clamp_timer_interval) { + dev_dbg(drv->dev, "CPR measurement is not ready\n"); + goto unlock; + } else if (!cpr_is_allowed(drv)) { + val = cpr_read(drv, REG_RBCPR_CTL); + dev_err_ratelimited(drv->dev, + "Interrupt broken? RBCPR_CTL = %#02x\n", + val); + goto unlock; + } + + /* Following sequence of handling is as per each IRQ's priority */ + if (val & CPR_INT_UP) { + new_uV = cpr_scale(drv, UP); + } else if (val & CPR_INT_DOWN) { + new_uV = cpr_scale(drv, DOWN); + } else if (val & CPR_INT_MIN) { + cpr_irq_clr_nack(drv); + } else if (val & CPR_INT_MAX) { + cpr_irq_clr_nack(drv); + } else if (val & CPR_INT_MID) { + /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */ + dev_dbg(drv->dev, "IRQ occurred for Mid Flag\n"); + } else { + dev_dbg(drv->dev, "IRQ occurred for unknown flag (%#08x)\n", + val); + } + + /* Save register values for the corner */ + corner = drv->corner; + cpr_corner_save(drv, corner); +unlock: + mutex_unlock(&drv->lock); + + if (new_uV) + dev_pm_opp_adjust_voltage(drv->cpu_dev, corner->freq, new_uV); + + return IRQ_HANDLED; +} + +/* + * TODO: Register for hotplug notifier and turn on/off CPR when CPUs are offline + */ +static int cpr_enable(struct cpr_drv *drv) +{ + int ret; + + /* Enable dependency power before vdd_apc */ + if (drv->vdd_mx) { + ret = regulator_enable(drv->vdd_mx); + if (ret) + return ret; + } + + ret = regulator_enable(drv->vdd_apc); + if (ret) + return ret; + + mutex_lock(&drv->lock); + //drv->vreg_enabled = true; + if (cpr_is_allowed(drv) && drv->corner) { + cpr_irq_clr(drv); + cpr_corner_restore(drv, drv->corner); + cpr_ctl_enable(drv, drv->corner); + } + mutex_unlock(&drv->lock); + + return 0; +} + +static int cpr_disable(struct cpr_drv *drv) +{ + int ret; + + ret = regulator_disable(drv->vdd_apc); + if (ret) + return ret; + + if (drv->vdd_mx) + ret = regulator_disable(drv->vdd_mx); + if (ret) + return ret; + + mutex_lock(&drv->lock); + //drv->vreg_enabled = false; + if (cpr_is_allowed(drv)) + cpr_ctl_disable(drv); + mutex_unlock(&drv->lock); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int cpr_suspend(struct device *dev) +{ + struct cpr_drv *drv = platform_get_drvdata(to_platform_device(dev)); + + if (cpr_is_allowed(drv)) { + mutex_lock(&drv->lock); + cpr_ctl_disable(drv); + cpr_irq_clr(drv); + drv->suspended = true; + mutex_unlock(&drv->lock); + } + + return 0; +} + +static int cpr_resume(struct device *dev) +{ + struct cpr_drv *drv = platform_get_drvdata(to_platform_device(dev)); + + if (cpr_is_allowed(drv)) { + mutex_lock(&drv->lock); + drv->suspended = false; + cpr_irq_clr(drv); + cpr_ctl_enable(drv, drv->corner); + mutex_unlock(&drv->lock); + } + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(cpr_pm_ops, cpr_suspend, cpr_resume); + +static int cpr_config(struct cpr_drv *drv) +{ + int i; + u32 val, gcnt; + struct corner *corner; + + /* Disable interrupt and CPR */ + cpr_write(drv, REG_RBIF_IRQ_EN(0), 0); + cpr_write(drv, REG_RBCPR_CTL, 0); + + /* Program the default HW Ceiling, Floor and vlevel */ + val = RBIF_LIMIT_CEILING_DEFAULT << RBIF_LIMIT_CEILING_SHIFT; + val |= RBIF_LIMIT_FLOOR_DEFAULT; + cpr_write(drv, REG_RBIF_LIMIT, val); + cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT); + + /* Clear the target quotient value and gate count of all ROs */ + for (i = 0; i < CPR_NUM_RING_OSC; i++) + cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0); + + /* Init and save gcnt */ + gcnt = (drv->ref_clk_khz * drv->gcnt_us) / 1000; + gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK; + gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT; + drv->gcnt = gcnt; + + /* Program the delay count for the timer */ + val = (drv->ref_clk_khz * drv->timer_delay_us) / 1000; + cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val); + dev_dbg(drv->dev, "Timer count: 0x%0x (for %d us)\n", val, + drv->timer_delay_us); + + /* Program Consecutive Up & Down */ + val = drv->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT; + val |= drv->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT; + val |= drv->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT; + cpr_write(drv, REG_RBIF_TIMER_ADJUST, val); + + /* Program the control register */ + val = drv->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT; + val |= drv->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT; + val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE; + val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN; + cpr_write(drv, REG_RBCPR_CTL, val); + + for (i = 0; i < drv->num_corners; i++) { + corner = &drv->corners[i]; + corner->save_ctl = val; + corner->save_irq = CPR_INT_DEFAULT; + } + + cpr_irq_set(drv, CPR_INT_DEFAULT); + + val = cpr_read(drv, REG_RBCPR_VERSION); + if (val <= RBCPR_VER_2) + drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS; + + return 0; +} + +/* Called twice for each CPU in policy, one pre and one post event */ +static int +cpr_cpufreq_notifier(struct notifier_block *nb, unsigned long event, void *f) +{ + struct cpr_drv *drv = container_of(nb, struct cpr_drv, cpufreq_nb); + struct cpufreq_freqs *freqs = f; + unsigned long old = freqs->old * 1000; + unsigned long new = freqs->new * 1000; + struct corner *corner, *end; + enum voltage_change_dir dir; + int ret = 0, new_uV; + int vdd_mx_vmin = 0; + struct fuse_corner *fuse_corner; + + /* Determine direction */ + if (old > new) + dir = DOWN; + else if (old < new) + dir = UP; + else + dir = NO_CHANGE; + + /* Determine new corner we're going to */ + corner = drv->corners; + end = &corner[drv->num_corners - 1]; + for (; corner <= end; corner++) + if (corner->freq == new) + break; + + if (corner > end) + return -EINVAL; + + fuse_corner = corner->fuse_corner; + + if (cpr_is_allowed(drv)) { + new_uV = corner->last_uV; + } else { + new_uV = corner->uV; + } + + if (dir != NO_CHANGE && drv->vdd_mx) + vdd_mx_vmin = cpr_mx_get(drv, fuse_corner, new_uV); + + mutex_lock(&drv->lock); + if (event == CPUFREQ_PRECHANGE) { + if (drv->nb_count++) + goto unlock; + + if (cpr_is_allowed(drv)) + cpr_ctl_disable(drv); + + ret = cpr_pre_voltage(drv, fuse_corner, dir, vdd_mx_vmin); + if (ret) + goto unlock; + + drv->switching_opp = true; + } + + if (event == CPUFREQ_POSTCHANGE) { + if (--drv->nb_count) + goto unlock; + + ret = cpr_post_voltage(drv, fuse_corner, dir, vdd_mx_vmin); + if (ret) + goto unlock; + + if (cpr_is_allowed(drv) /* && drv->vreg_enabled */) { + cpr_irq_clr(drv); + if (drv->corner != corner) + cpr_corner_restore(drv, corner); + cpr_ctl_enable(drv, corner); + } + + drv->corner = corner; + drv->switching_opp = false; + } +unlock: + mutex_unlock(&drv->lock); + + return ret; +} + +static u32 cpr_read_efuse(void __iomem *prom, const struct qfprom_offset *efuse) +{ + u64 buffer = 0; + u8 val; + int i, num_bytes; + + num_bytes = DIV_ROUND_UP(efuse->width + efuse->shift, BITS_PER_BYTE); + + for (i = 0; i < num_bytes; i++) { + val = readb_relaxed(prom + efuse->offset + i); + buffer |= val << (i * BITS_PER_BYTE); + } + + buffer >>= efuse->shift; + buffer &= BIT(efuse->width) - 1; + + return buffer; +} + +static void +cpr_populate_ring_osc_idx(const struct cpr_fuse *fuses, struct cpr_drv *drv, + void __iomem *prom) +{ + struct fuse_corner *fuse = drv->fuse_corners; + struct fuse_corner *end = fuse + drv->num_fuse_corners; + + for (; fuse < end; fuse++, fuses++) + fuse->ring_osc_idx = cpr_read_efuse(prom, &fuses->ring_osc); +} + + +static const struct corner_adjustment *cpr_find_adjustment(u32 speed_bin, + u32 pvs_version, u32 cpr_rev, const struct cpr_desc *desc, + const struct cpr_drv *drv) +{ + int i, j; + u32 val, ro; + struct corner_adjustment *a; + + for (i = 0; i < desc->num_adjustments; i++) { + a = &desc->adjustments[i]; + + if (a->speed_bin != speed_bin && + a->speed_bin != FUSE_PARAM_MATCH_ANY) + continue; + if (a->pvs_version != pvs_version && + a->pvs_version != FUSE_PARAM_MATCH_ANY) + continue; + if (a->cpr_rev != cpr_rev && + a->cpr_rev != FUSE_PARAM_MATCH_ANY) + continue; + for (j = 0; j < drv->num_fuse_corners; j++) { + val = a->ring_osc_idx[j]; + ro = drv->fuse_corners[j].ring_osc_idx; + if (val != ro && val != FUSE_PARAM_MATCH_ANY) + break; + } + if (j == drv->num_fuse_corners) + return a; + } + + return NULL; +} + +static const int *cpr_get_pvs_uV(const struct cpr_desc *desc, + struct nvmem_device *qfprom) +{ + const struct qfprom_offset *pvs_efuse; + const struct qfprom_offset *redun; + unsigned int idx = 0; + u8 expected; + u32 bin; + + redun = &desc->pvs_fuses->redundant; + expected = desc->pvs_fuses->redundant_value; + if (redun->width) + idx = !!(cpr_read_efuse(qfprom, redun) == expected); + + pvs_efuse = &desc->pvs_fuses->pvs_fuse[idx]; + bin = cpr_read_efuse(qfprom, pvs_efuse); + + return desc->pvs_fuses->pvs_bins[bin].uV; +} + +static int cpr_read_fuse_uV(const struct cpr_desc *desc, + const struct fuse_corner_data *fdata, + const struct qfprom_offset *init_v_efuse, + struct nvmem_device *qfprom, int step_volt) +{ + int step_size_uV, steps, uV; + u32 bits; + + bits = cpr_read_efuse(qfprom, init_v_efuse); + steps = bits & ~BIT(init_v_efuse->width - 1); + /* Not two's complement.. instead highest bit is sign bit */ + if (bits & BIT(init_v_efuse->width - 1)) + steps = -steps; + + step_size_uV = desc->cpr_fuses.init_voltage_step; + + uV = fdata->ref_uV + steps * step_size_uV; + return DIV_ROUND_UP(uV, step_volt) * step_volt; +} + +static void cpr_fuse_corner_init(struct cpr_drv *drv, + const struct cpr_desc *desc, + void __iomem *qfprom, + const struct cpr_fuse *fuses, u32 speed, + const struct corner_adjustment *adjustments, + const struct acc_desc *acc_desc) +{ + int i; + unsigned int step_volt; + const struct fuse_corner_data *fdata; + struct fuse_corner *fuse, *end, *prev; + const struct qfprom_offset *redun; + const struct fuse_conditional_min_volt *min_v; + const struct fuse_uplift_wa *up; + bool do_min_v = false, do_uplift = false; + const int *pvs_uV = NULL; + const int *adj_min; + int uV, diff; + u32 min_uV; + u8 expected; + const struct reg_sequence *accs; + + redun = &acc_desc->override; + expected = acc_desc->override_value; + if (redun->width && cpr_read_efuse(qfprom, redun) == expected) + accs = acc_desc->override_settings; + else + accs = acc_desc->settings; + + /* Figure out if we should apply workarounds */ + min_v = desc->min_volt_fuse; + do_min_v = min_v && + cpr_read_efuse(qfprom, &min_v->redundant) == min_v->expected; + if (do_min_v) + min_uV = min_v->min_uV; + + up = desc->uplift_wa; + if (!do_min_v && up) + if (cpr_read_efuse(qfprom, &up->redundant) == up->expected) + do_uplift = up->speed_bin == speed; + + /* + * The initial voltage for each fuse corner may be determined by one of + * two ways. Either initial voltages are encoded for each fuse corner + * in a dedicated fuse per fuse corner (fuses::init_voltage), or we + * use the PVS bin fuse to use a table of initial voltages (pvs_uV). + */ + if (fuses->init_voltage.width) + //step_volt = regulator_get_linear_step(drv->vdd_apc); + step_volt = 12500; /* TODO: Replace with ^ when apc_reg ready */ + else + pvs_uV = cpr_get_pvs_uV(desc, qfprom); + + /* Populate fuse_corner members */ + adj_min = adjustments->fuse_quot_min; + fuse = drv->fuse_corners; + end = &fuse[drv->num_fuse_corners - 1]; + fdata = desc->cpr_fuses.fuse_corner_data; + + for (i = 0, prev = NULL; fuse <= end; fuse++, fuses++, i++, fdata++) { + + /* Populate uV */ + if (pvs_uV) + uV = pvs_uV[i]; + else + uV = cpr_read_fuse_uV(desc, fdata, &fuses->init_voltage, + qfprom, step_volt); + + if (adjustments->fuse_init_uV) + uV += adjustments->fuse_init_uV[i]; + + fuse->min_uV = fdata->min_uV; + fuse->max_uV = fdata->max_uV; + + if (do_min_v) { + if (fuse->max_uV < min_uV) { + fuse->max_uV = min_uV; + fuse->min_uV = min_uV; + } else if (fuse->min_uV < min_uV) { + fuse->min_uV = min_uV; + } + } + + fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV); + + if (fuse == end) { + if (do_uplift) { + end->uV += up->uV; + end->uV = clamp(end->uV, 0, up->max_uV); + } + /* + * Allow the highest fuse corner's PVS voltage to + * define the ceiling voltage for that corner in order + * to support SoC's in which variable ceiling values + * are required. + */ + end->max_uV = max(end->max_uV, end->uV); + } + + /* Populate target quotient by scaling */ + fuse->quot = cpr_read_efuse(qfprom, &fuses->quotient); + fuse->quot *= fdata->quot_scale; + fuse->quot += fdata->quot_offset; + + if (adjustments->fuse_quot) { + fuse->quot += adjustments->fuse_quot[i]; + + if (prev && adjustments->fuse_quot_diff) { + diff = adjustments->fuse_quot_diff[i]; + if (fuse->quot - prev->quot <= diff) + fuse->quot = prev->quot + adj_min[i]; + } + prev = fuse; + } + + if (do_uplift) + fuse->quot += up->quot[i]; + + fuse->step_quot = desc->step_quot[fuse->ring_osc_idx]; + + /* Populate acc settings */ + fuse->accs = accs; + fuse->num_accs = acc_desc->num_regs_per_fuse; + accs += acc_desc->num_regs_per_fuse; + + /* Populate MX request */ + fuse->vdd_mx_req = fdata->vdd_mx_req; + } + + /* + * Restrict all fuse corner PVS voltages based upon per corner + * ceiling and floor voltages. + */ + for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) { + if (fuse->uV > fuse->max_uV) + fuse->uV = fuse->max_uV; + else if (fuse->uV < fuse->min_uV) + fuse->uV = fuse->min_uV; + + dev_dbg(drv->dev, + "fuse corner %d: [%d %d %d] RO%d quot %d squot %d\n", + i, fuse->min_uV, fuse->uV, fuse->max_uV, + fuse->ring_osc_idx, fuse->quot, fuse->step_quot); + } +} + +static struct device *cpr_get_cpu_device(struct device_node *of_node, int index) +{ + struct device_node *np; + int cpu; + + np = of_parse_phandle(of_node, "qcom,cpr-cpus", index); + if (!np) + return NULL; + + for_each_possible_cpu(cpu) + if (arch_find_n_match_cpu_physical_id(np, cpu, NULL)) + break; + + of_node_put(np); + if (cpu >= nr_cpu_ids) + return NULL; + + return get_cpu_device(cpu); +} + +/* + * Get the clock and regulator for the first CPU so we can update OPPs, + * listen in on regulator voltage change events, and figure out the + * boot OPP based on clock frequency. + */ +static int +cpr_get_cpu_resources(struct cpr_drv *drv, struct device_node *of_node) +{ + struct device *cpu_dev; + + cpu_dev = cpr_get_cpu_device(of_node, 0); + if (!cpu_dev) + return -EINVAL; + + drv->cpu_dev = cpu_dev; + drv->vdd_apc = devm_regulator_get(cpu_dev, "cpu"); + if (IS_ERR(drv->vdd_apc)) + return PTR_ERR(drv->vdd_apc); + drv->cpu_clk = devm_clk_get(cpu_dev, NULL); + + return PTR_ERR_OR_ZERO(drv->cpu_clk); +} + +static int cpr_populate_opps(struct device_node *of_node, struct cpr_drv *drv, + const struct corner_data **plan) +{ + int i, j, ret; + struct device *cpu_dev; + struct corner *corner; + const struct corner_data *p; + + for (i = 0; (cpu_dev = cpr_get_cpu_device(of_node, i)); i++) + for (j = 0, corner = drv->corners; plan[j]; j++, corner++) { + p = plan[j]; + ret = dev_pm_opp_add(cpu_dev, p->freq, corner->uV); + if (ret) + return ret; + } + + return 0; +} + +static const struct corner_data ** +find_freq_plan(const struct cpr_desc *desc, u32 speed_bin, u32 pvs_version) +{ + int i; + const struct freq_plan *p; + + for (i = 0; i < desc->num_freq_plans; i++) { + p = &desc->freq_plans[i]; + + if (p->speed_bin != speed_bin && + p->speed_bin != FUSE_PARAM_MATCH_ANY) + continue; + if (p->pvs_version != pvs_version && + p->pvs_version != FUSE_PARAM_MATCH_ANY) + continue; + + return p->plan; + } + + return NULL; + +} + +static struct corner_override *find_corner_override(const struct cpr_desc *desc, + u32 speed_bin, u32 pvs_version) +{ + int i; + struct corner_override *o; + + for (i = 0; i < desc->num_corner_overrides; i++) { + o = &desc->corner_overrides[i]; + + if (o->speed_bin != speed_bin && + o->speed_bin != FUSE_PARAM_MATCH_ANY) + continue; + if (o->pvs_version != pvs_version && + o->pvs_version != FUSE_PARAM_MATCH_ANY) + continue; + + return o; + } + + return NULL; + +} +static int cpr_calculate_scaling(const struct qfprom_offset *quot_offset, + struct nvmem_device *qfprom, + const struct fuse_corner_data *fdata, + int adj_quot_offset, + const struct corner *corner) +{ + int quot_diff; + unsigned long freq_diff; + int scaling; + const struct fuse_corner *fuse, *prev_fuse; + + fuse = corner->fuse_corner; + prev_fuse = fuse - 1; + + if (quot_offset->width) { + quot_diff = cpr_read_efuse(qfprom, quot_offset); + quot_diff *= fdata->quot_scale; + quot_diff += adj_quot_offset; + } else { + quot_diff = fuse->quot - prev_fuse->quot; + } + + freq_diff = fuse->max_freq - prev_fuse->max_freq; + freq_diff /= 1000000; /* Convert to MHz */ + scaling = 1000 * quot_diff / freq_diff; + return min(scaling, fdata->max_quot_scale); +} + +static int cpr_interpolate(const struct corner *corner, int step_volt, + const struct fuse_corner_data *fdata) +{ + unsigned long f_high, f_low, f_diff; + int uV_high, uV_low, uV; + u64 temp, temp_limit; + const struct fuse_corner *fuse, *prev_fuse; + + fuse = corner->fuse_corner; + prev_fuse = fuse - 1; + + f_high = fuse->max_freq; + f_low = prev_fuse->max_freq; + uV_high = fuse->uV; + uV_low = prev_fuse->uV; + f_diff = fuse->max_freq - corner->freq; + + /* + * Don't interpolate in the wrong direction. This could happen + * if the adjusted fuse voltage overlaps with the previous fuse's + * adjusted voltage. + */ + if (f_high <= f_low || uV_high <= uV_low || f_high <= corner->freq) + return corner->uV; + + temp = f_diff * (uV_high - uV_low); + do_div(temp, f_high - f_low); + + /* + * max_volt_scale has units of uV/MHz while freq values + * have units of Hz. Divide by 1000000 to convert to. + */ + temp_limit = f_diff * fdata->max_volt_scale; + do_div(temp_limit, 1000000); + + uV = uV_high - min(temp, temp_limit); + return roundup(uV, step_volt); +} + +static void cpr_corner_init(struct cpr_drv *drv, const struct cpr_desc *desc, + const struct cpr_fuse *fuses, u32 speed_bin, + u32 pvs_version, void __iomem *qfprom, + const struct corner_adjustment *adjustments, + const struct corner_data **plan) +{ + int i, fnum, scaling; + const struct qfprom_offset *quot_offset; + struct fuse_corner *fuse, *prev_fuse; + struct corner *corner, *end; + const struct corner_data *cdata, *p; + const struct fuse_corner_data *fdata; + bool apply_scaling; + const int *adj_quot_offset; + unsigned long freq_corner, freq_diff, freq_diff_mhz; + int step_volt = 12500; /* TODO: Get from regulator APIs */ + const struct corner_override *override; + + corner = drv->corners; + end = &corner[drv->num_corners - 1]; + cdata = desc->corner_data; + adj_quot_offset = adjustments->fuse_quot_offset; + + override = find_corner_override(desc, speed_bin, pvs_version); + + /* + * Store maximum frequency for each fuse corner based on the frequency + * plan + */ + for (i = 0; plan[i]; i++) { + p = plan[i]; + freq_corner = p->freq; + fnum = p->fuse_corner; + fuse = &drv->fuse_corners[fnum]; + if (freq_corner > fuse->max_freq) + fuse->max_freq = freq_corner; + + } + + /* + * Get the quotient adjustment scaling factor, according to: + * + * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1)) + * / (freq(corner_N) - freq(corner_N-1)), max_factor) + * + * QUOT(corner_N): quotient read from fuse for fuse corner N + * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1) + * freq(corner_N): max frequency in MHz supported by fuse corner N + * freq(corner_N-1): max frequency in MHz supported by fuse corner + * (N - 1) + * + * Then walk through the corners mapped to each fuse corner + * and calculate the quotient adjustment for each one using the + * following formula: + * + * quot_adjust = (freq_max - freq_corner) * scaling / 1000 + * + * freq_max: max frequency in MHz supported by the fuse corner + * freq_corner: frequency in MHz corresponding to the corner + * scaling: calculated from above equation + * + * + * + + + * | v | + * q | f c o | f c + * u | c l | c + * o | f t | f + * t | c a | c + * | c f g | c f + * | e | + * +--------------- +---------------- + * 0 1 2 3 4 5 6 0 1 2 3 4 5 6 + * corner corner + * + * c = corner + * f = fuse corner + * + */ + for (apply_scaling = false, i = 0; corner <= end; corner++, i++) { + fnum = cdata[i].fuse_corner; + fdata = &desc->cpr_fuses.fuse_corner_data[fnum]; + quot_offset = &fuses[fnum].quotient_offset; + fuse = &drv->fuse_corners[fnum]; + if (fnum) + prev_fuse = &drv->fuse_corners[fnum - 1]; + else + prev_fuse = NULL; + + corner->fuse_corner = fuse; + corner->freq = cdata[i].freq; + corner->uV = fuse->uV; + + if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) { + scaling = cpr_calculate_scaling(quot_offset, qfprom, + fdata, adj_quot_offset ? + adj_quot_offset[fnum] : 0, + corner); + apply_scaling = true; + } else if (corner->freq == fuse->max_freq) { + /* This is a fuse corner; don't scale anything */ + apply_scaling = false; + } + + if (apply_scaling) { + freq_diff = fuse->max_freq - corner->freq; + freq_diff_mhz = freq_diff / 1000000; + corner->quot_adjust = scaling * freq_diff_mhz / 1000; + + corner->uV = cpr_interpolate(corner, step_volt, fdata); + } + + if (adjustments->fuse_quot) + corner->quot_adjust -= adjustments->fuse_quot[i]; + + if (adjustments->init_uV) + corner->uV += adjustments->init_uV[i]; + + /* Load per corner ceiling and floor voltages if they exist. */ + if (override) { + corner->max_uV = override->max_uV[i]; + corner->min_uV = override->min_uV[i]; + } else { + corner->max_uV = fuse->max_uV; + corner->min_uV = fuse->min_uV; + } + + corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV); + corner->last_uV = corner->uV; + + /* Reduce the ceiling voltage if needed */ + if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV) + corner->max_uV = corner->uV; + else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV) + corner->max_uV = max(corner->min_uV, fuse->uV); + + dev_dbg(drv->dev, "corner %d: [%d %d %d] quot %d\n", i, + corner->min_uV, corner->uV, corner->max_uV, + fuse->quot - corner->quot_adjust); + } +} + +static const struct cpr_fuse * +cpr_get_fuses(const struct cpr_desc *desc, void __iomem *qfprom) +{ + u32 expected = desc->cpr_fuses.redundant_value; + const struct qfprom_offset *fuse = &desc->cpr_fuses.redundant; + unsigned int idx; + + idx = !!(fuse->width && cpr_read_efuse(qfprom, fuse) == expected); + + return &desc->cpr_fuses.cpr_fuse[idx * desc->num_fuse_corners]; +} + +static bool cpr_is_close_loop_disabled(struct cpr_drv *drv, + const struct cpr_desc *desc, void __iomem *qfprom, + const struct cpr_fuse *fuses, + const struct corner_adjustment *adj) +{ + const struct qfprom_offset *disable; + unsigned int idx; + struct fuse_corner *highest_fuse, *second_highest_fuse; + int min_diff_quot, diff_quot; + + if (adj->disable_closed_loop) + return true; + + if (!desc->cpr_fuses.disable) + return false; + + /* + * Are the fuses the redundant ones? This avoids reading the fuse + * redundant bit again + */ + idx = !!(fuses == desc->cpr_fuses.cpr_fuse); + disable = &desc->cpr_fuses.disable[idx]; + + if (cpr_read_efuse(qfprom, disable)) + return true; + + if (!fuses->quotient_offset.width) { + /* + * Check if the target quotients for the highest two fuse + * corners are too close together. + */ + highest_fuse = &drv->fuse_corners[drv->num_fuse_corners - 1]; + second_highest_fuse = highest_fuse - 1; + + min_diff_quot = desc->min_diff_quot; + diff_quot = highest_fuse->quot - second_highest_fuse->quot; + + return diff_quot < min_diff_quot; + } + + return false; +} + +static int cpr_init_parameters(struct platform_device *pdev, + struct cpr_drv *drv) +{ + struct device_node *of_node = pdev->dev.of_node; + int ret; + + ret = of_property_read_u32(of_node, "qcom,cpr-ref-clk", + &drv->ref_clk_khz); + if (ret) + return ret; + ret = of_property_read_u32(of_node, "qcom,cpr-timer-delay-us", + &drv->timer_delay_us); + if (ret) + return ret; + ret = of_property_read_u32(of_node, "qcom,cpr-timer-cons-up", + &drv->timer_cons_up); + if (ret) + return ret; + ret = of_property_read_u32(of_node, "qcom,cpr-timer-cons-down", + &drv->timer_cons_down); + if (ret) + return ret; + drv->timer_cons_down &= RBIF_TIMER_ADJ_CONS_DOWN_MASK; + + ret = of_property_read_u32(of_node, "qcom,cpr-up-threshold", + &drv->up_threshold); + drv->up_threshold &= RBCPR_CTL_UP_THRESHOLD_MASK; + if (ret) + return ret; + + ret = of_property_read_u32(of_node, "qcom,cpr-down-threshold", + &drv->down_threshold); + drv->down_threshold &= RBCPR_CTL_DN_THRESHOLD_MASK; + if (ret) + return ret; + + ret = of_property_read_u32(of_node, "qcom,cpr-idle-clocks", + &drv->idle_clocks); + drv->idle_clocks &= RBCPR_STEP_QUOT_IDLE_CLK_MASK; + if (ret) + return ret; + + ret = of_property_read_u32(of_node, "qcom,cpr-gcnt-us", &drv->gcnt_us); + if (ret) + return ret; + ret = of_property_read_u32(of_node, "qcom,vdd-apc-step-up-limit", + &drv->vdd_apc_step_up_limit); + if (ret) + return ret; + ret = of_property_read_u32(of_node, "qcom,vdd-apc-step-down-limit", + &drv->vdd_apc_step_down_limit); + if (ret) + return ret; + + ret = of_property_read_u32(of_node, "qcom,cpr-clamp-timer-interval", + &drv->clamp_timer_interval); + if (ret && ret != -EINVAL) + return ret; + + drv->clamp_timer_interval = min_t(unsigned int, + drv->clamp_timer_interval, + RBIF_TIMER_ADJ_CLAMP_INT_MASK); + + dev_dbg(drv->dev, "up threshold = %u, down threshold = %u\n", + drv->up_threshold, drv->down_threshold); + + return 0; +} + +static int cpr_init_and_enable_corner(struct cpr_drv *drv) +{ + unsigned long rate; + const struct corner *end; + + end = &drv->corners[drv->num_corners - 1]; + rate = clk_get_rate(drv->cpu_clk); + + for (drv->corner = drv->corners; drv->corner <= end; drv->corner++) + if (drv->corner->freq == rate) + break; + + if (drv->corner > end) + return -EINVAL; + + return cpr_enable(drv); +} + +static struct corner_data msm8916_corner_data[] = { + /* [corner] -> { fuse corner, freq } */ + { 0, 200000000 }, + { 0, 400000000 }, + { 1, 533330000 }, + { 1, 800000000 }, + { 2, 998400000 }, + { 2, 1094400000 }, + { 2, 1152000000 }, + { 2, 1209600000 }, + { 2, 1363200000 }, +}; + +static const struct cpr_desc msm8916_desc = { + .num_fuse_corners = 3, + .vdd_mx_vmin_method = VDD_MX_VMIN_FUSE_CORNER_MAP, + .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF, + .step_quot = (int []){ 26, 26, 26, 26, 26, 26, 26, 26 }, + .cpr_fuses = { + .init_voltage_step = 10000, + .fuse_corner_data = (struct fuse_corner_data[]){ + /* ref_uV max_uV min_uV max_q q_off q_scl v_scl mx */ + { 1050000, 1050000, 1050000, 0, 0, 1, 0, 3 }, + { 1150000, 1150000, 1050000, 0, 0, 1, 0, 4 }, + { 1350000, 1350000, 1162500, 650, 0, 1, 0, 6 }, + }, + .cpr_fuse = (struct cpr_fuse[]){ + { + .ring_osc = { 222, 3, 6}, + .init_voltage = { 220, 6, 2 }, + .quotient = { 221, 12, 2 }, + }, + { + .ring_osc = { 222, 3, 6}, + .init_voltage = { 218, 6, 2 }, + .quotient = { 219, 12, 0 }, + }, + { + .ring_osc = { 222, 3, 6}, + .init_voltage = { 216, 6, 0 }, + .quotient = { 216, 12, 6 }, + }, + }, + .disable = &(struct qfprom_offset){ 223, 1, 1 }, + }, + .speed_bin = { 12, 3, 2 }, + .pvs_version = { 6, 2, 7 }, + .corner_data = msm8916_corner_data, + .num_corners = ARRAY_SIZE(msm8916_corner_data), + .num_freq_plans = 3, + .freq_plans = (struct freq_plan[]){ + { + .speed_bin = 0, + .pvs_version = 0, + .plan = (const struct corner_data* []){ + msm8916_corner_data + 0, + msm8916_corner_data + 1, + msm8916_corner_data + 2, + msm8916_corner_data + 3, + msm8916_corner_data + 4, + msm8916_corner_data + 5, + msm8916_corner_data + 6, + msm8916_corner_data + 7, + NULL + }, + }, + { + .speed_bin = 0, + .pvs_version = 1, + .plan = (const struct corner_data* []){ + msm8916_corner_data + 0, + msm8916_corner_data + 1, + msm8916_corner_data + 2, + msm8916_corner_data + 3, + msm8916_corner_data + 4, + msm8916_corner_data + 5, + msm8916_corner_data + 6, + msm8916_corner_data + 7, + NULL + }, + }, + { + .speed_bin = 2, + .pvs_version = 0, + .plan = (const struct corner_data* []){ + msm8916_corner_data + 0, + msm8916_corner_data + 1, + msm8916_corner_data + 2, + msm8916_corner_data + 3, + msm8916_corner_data + 4, + msm8916_corner_data + 5, + msm8916_corner_data + 6, + msm8916_corner_data + 7, + msm8916_corner_data + 8, + NULL + }, + }, + }, +}; + +static const struct acc_desc msm8916_acc_desc = { + .settings = (struct reg_sequence[]){ + { 0xf000, 0 }, + { 0xf000, 0x100 }, + { 0xf000, 0x101 } + }, + .override_settings = (struct reg_sequence[]){ + { 0xf000, 0 }, + { 0xf000, 0x100 }, + { 0xf000, 0x100 } + }, + .num_regs_per_fuse = 1, + .override = { 6, 1, 4 }, + .override_value = 1, +}; + +static const struct of_device_id cpr_descs[] = { + { .compatible = "qcom,qfprom-msm8916", .data = &msm8916_desc }, + { } +}; + +static const struct of_device_id acc_descs[] = { + { .compatible = "qcom,tcsr-msm8916", .data = &msm8916_acc_desc }, + { } +}; + +static int cpr_probe(struct platform_device *pdev) +{ + struct resource *res; + struct device *dev = &pdev->dev; + struct cpr_drv *drv; + const struct cpr_fuse *cpr_fuses; + const struct corner_adjustment *adj; + const struct corner_adjustment empty_adj = { }; + const struct corner_data **plan; + size_t len; + int irq, ret; + const struct cpr_desc *desc; + const struct acc_desc *acc_desc; + const struct of_device_id *match; + struct device_node *np; + void __iomem *qfprom; + u32 cpr_rev = FUSE_REVISION_UNKNOWN; + u32 speed_bin = SPEED_BIN_NONE; + u32 pvs_version = 0; + + np = of_parse_phandle(dev->of_node, "eeprom", 0); + if (!np) + return -ENODEV; + + match = of_match_node(cpr_descs, np); + of_node_put(np); + if (!match) + return -EINVAL; + desc = match->data; + + /* TODO: Get from eeprom API */ + qfprom = devm_ioremap(dev, 0x58000, 0x7000); + if (!qfprom) + return -ENOMEM; + + len = sizeof(*drv) + + sizeof(*drv->fuse_corners) * desc->num_fuse_corners + + sizeof(*drv->corners) * desc->num_corners; + + drv = devm_kzalloc(dev, len, GFP_KERNEL); + if (!drv) + return -ENOMEM; + drv->dev = dev; + + np = of_parse_phandle(dev->of_node, "acc-syscon", 0); + if (!np) + return -ENODEV; + + match = of_match_node(acc_descs, np); + if (!match) { + of_node_put(np); + return -EINVAL; + } + + acc_desc = match->data; + drv->tcsr = syscon_node_to_regmap(np); + of_node_put(np); + if (IS_ERR(drv->tcsr)) + return PTR_ERR(drv->tcsr); + + drv->num_fuse_corners = desc->num_fuse_corners; + drv->num_corners = desc->num_corners; + drv->fuse_corners = (struct fuse_corner *)(drv + 1); + drv->corners = (struct corner *)(drv->fuse_corners + + drv->num_fuse_corners); + mutex_init(&drv->lock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + drv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(drv->base)) + return PTR_ERR(drv->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + ret = cpr_get_cpu_resources(drv, dev->of_node); + if (ret) + return ret; + + drv->vdd_mx = devm_regulator_get(dev, "vdd-mx"); + if (IS_ERR(drv->vdd_mx)) + return PTR_ERR(drv->vdd_mx); + + drv->vdd_mx_vmin_method = desc->vdd_mx_vmin_method; + + if (desc->fuse_revision.width) + cpr_rev = cpr_read_efuse(qfprom, &desc->fuse_revision); + if (desc->speed_bin.width) + speed_bin = cpr_read_efuse(qfprom, &desc->speed_bin); + if (desc->pvs_version.width) + pvs_version = cpr_read_efuse(qfprom, &desc->pvs_version); + + plan = find_freq_plan(desc, speed_bin, pvs_version); + if (!plan) + return -EINVAL; + + cpr_fuses = cpr_get_fuses(desc, qfprom); + cpr_populate_ring_osc_idx(cpr_fuses, drv, qfprom); + + adj = cpr_find_adjustment(speed_bin, pvs_version, cpr_rev, desc, drv); + if (!adj) + adj = &empty_adj; + + cpr_fuse_corner_init(drv, desc, qfprom, cpr_fuses, speed_bin, adj, + acc_desc); + cpr_corner_init(drv, desc, cpr_fuses, speed_bin, pvs_version, qfprom, + adj, plan); + + ret = cpr_populate_opps(dev->of_node, drv, plan); + if (ret) + return ret; + + drv->loop_disabled = cpr_is_close_loop_disabled(drv, desc, qfprom, + cpr_fuses, adj); + dev_dbg(drv->dev, "CPR closed loop is %sabled\n", + drv->loop_disabled ? "dis" : "en"); + + ret = cpr_init_parameters(pdev, drv); + if (ret) + return ret; + + /* Configure CPR HW but keep it disabled */ + ret = cpr_config(drv); + if (ret) + return ret; + + /* Enable ACC if required */ + if (acc_desc->enable_mask) + regmap_update_bits(drv->tcsr, acc_desc->enable_reg, + acc_desc->enable_mask, + acc_desc->enable_mask); + + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + cpr_irq_handler, IRQF_ONESHOT | IRQF_TRIGGER_RISING, + "cpr", drv); + if (ret) + return ret; + + ret = cpr_init_and_enable_corner(drv); + if (ret) + return ret; + + drv->reg_nb.notifier_call = cpr_regulator_notifier; + ret = regulator_register_notifier(drv->vdd_apc, &drv->reg_nb); + if (ret) + return ret; + + drv->cpufreq_nb.notifier_call = cpr_cpufreq_notifier; + ret = cpufreq_register_notifier(&drv->cpufreq_nb, + CPUFREQ_TRANSITION_NOTIFIER); + if (ret) { + regulator_unregister_notifier(drv->vdd_apc, &drv->reg_nb); + return ret; + } + + platform_set_drvdata(pdev, drv); + + return 0; +} + +static int cpr_remove(struct platform_device *pdev) +{ + struct cpr_drv *drv = platform_get_drvdata(pdev); + + if (cpr_is_allowed(drv)) { + cpr_ctl_disable(drv); + cpr_irq_set(drv, 0); + } + + return 0; +} + +static const struct of_device_id cpr_match_table[] = { + { .compatible = "qcom,cpr" }, + { } +}; +MODULE_DEVICE_TABLE(of, cpr_match_table); + +static struct platform_driver cpr_driver = { + .probe = cpr_probe, + .remove = cpr_remove, + .driver = { + .name = "qcom-cpr", + .of_match_table = cpr_match_table, + .pm = &cpr_pm_ops, + }, +}; +module_platform_driver(cpr_driver); + +MODULE_DESCRIPTION("Core Power Reduction (CPR) driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:qcom-cpr"); diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 936f7ccc9736..97451d0a8d72 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -609,6 +609,18 @@ config REGULATOR_QCOM_RPM Qualcomm RPM as a module. The module will be named "qcom_rpm-regulator". +config REGULATOR_QCOM_SAW + tristate "Qualcomm SAW regulator driver" + depends on (ARCH_QCOM || COMPILE_TEST) && MFD_SYSCON + help + If you say yes to this option, support will be included for the + regulators providing power to the CPU cores on devices such as + APQ8064. + + Say M here if you want to include support for the CPU core voltage + regulators as a module. The module will be named + "qcom_saw-regulator". + config REGULATOR_QCOM_SMD_RPM tristate "Qualcomm SMD based RPM regulator driver" depends on QCOM_SMD_RPM diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 2142a5d3fc08..34a60b926932 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -69,6 +69,7 @@ obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o +obj-$(CONFIG_REGULATOR_QCOM_SAW)+= qcom_saw-regulator.o obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o diff --git a/drivers/regulator/qcom_saw-regulator.c b/drivers/regulator/qcom_saw-regulator.c new file mode 100644 index 000000000000..c1411885680e --- /dev/null +++ b/drivers/regulator/qcom_saw-regulator.c @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2016, Linaro Limited. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/of_regulator.h> + +#define SPM_REG_STS_1 0x10 +#define SPM_REG_VCTL 0x14 +#define SPM_REG_PMIC_DATA_0 0x28 +#define SPM_REG_PMIC_DATA_1 0x2c +#define SPM_REG_RST 0x30 + +struct saw_vreg { + struct device *dev; + struct regmap *regmap; + struct regulator_desc rdesc; + struct regulator_dev *rdev; + unsigned int sel; +}; + +struct spm_vlevel_data { + struct saw_vreg *vreg; + unsigned int sel; +}; + +static int saw_regulator_get_voltage_sel(struct regulator_dev *rdev) +{ + struct saw_vreg *vreg = rdev_get_drvdata(rdev); + + return vreg->sel; +} + +static void smp_set_vdd(void *data) +{ + struct spm_vlevel_data *vdata = (struct spm_vlevel_data *)data; + struct saw_vreg *vreg = vdata->vreg; + unsigned long new_sel = vdata->sel; + u32 val, new_val; + u32 vctl, data0, data1; + unsigned long timeout; + + if (vreg->sel == new_sel) + return; + + regmap_read(vreg->regmap, SPM_REG_VCTL, &vctl); + regmap_read(vreg->regmap, SPM_REG_PMIC_DATA_0, &data0); + regmap_read(vreg->regmap, SPM_REG_PMIC_DATA_1, &data1); + + /* select the band */ + val = 0x80 | new_sel; + + vctl &= ~0xff; + vctl |= val; + + data0 &= ~0xff; + data0 |= val; + + data1 &= ~0x3f; + data1 |= val & 0x3f; + data1 &= ~0x3f0000; + data1 |= ((val & 0x3f) << 16); + + regmap_write(vreg->regmap, SPM_REG_RST, 1); + regmap_write(vreg->regmap, SPM_REG_VCTL, vctl); + regmap_write(vreg->regmap, SPM_REG_PMIC_DATA_0, data0); + regmap_write(vreg->regmap, SPM_REG_PMIC_DATA_1, data1); + + timeout = jiffies + usecs_to_jiffies(100); + do { + regmap_read(vreg->regmap, SPM_REG_STS_1, &new_val); + new_val &= 0xff; + if (new_val == val) { + vreg->sel = new_sel; + return; + } + + cpu_relax(); + + } while (time_before(jiffies, timeout)); + + pr_err("%s: Voltage not changed: %#x\n", __func__, new_val); +} + +static int saw_regulator_set_voltage_sel(struct regulator_dev *rdev, + unsigned selector) +{ + struct saw_vreg *vreg = rdev_get_drvdata(rdev); + struct spm_vlevel_data data; + int cpu = rdev_get_id(rdev); + + data.vreg = vreg; + data.sel = selector; + + return smp_call_function_single(cpu, smp_set_vdd, &data, true); +} + +static struct regulator_ops saw_regulator_ops = { + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = saw_regulator_set_voltage_sel, + .get_voltage_sel = saw_regulator_get_voltage_sel, + .set_voltage_time_sel = regulator_set_voltage_time_sel, +}; + +static struct regulator_desc saw_regulator = { + .owner = THIS_MODULE, + .type = REGULATOR_VOLTAGE, + .ops = &saw_regulator_ops, + .linear_ranges = (struct regulator_linear_range[]) { + REGULATOR_LINEAR_RANGE(700000, 0, 56, 12500), + }, + .n_linear_ranges = 1, + .n_voltages = 57, + .ramp_delay = 1250, +}; + +static struct saw_vreg *saw_get_drv(struct platform_device *pdev, + int *vreg_cpu) +{ + struct saw_vreg *vreg = NULL; + struct device_node *cpu_node, *saw_node; + int cpu; + bool found; + + for_each_possible_cpu(cpu) { + cpu_node = of_cpu_device_node_get(cpu); + if (!cpu_node) + continue; + saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0); + found = (saw_node == pdev->dev.of_node->parent); + of_node_put(saw_node); + of_node_put(cpu_node); + if (found) + break; + } + + if (found) { + vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL); + if (vreg) + *vreg_cpu = cpu; + } + + return vreg; +} + +static const struct of_device_id qcom_saw_regulator_match[] = { + { .compatible = "qcom,apq8064-saw2-v1.1-regulator" }, + { } +}; +MODULE_DEVICE_TABLE(of, qcom_saw_regulator_match); + +static int qcom_saw_regulator_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *saw_np; + struct saw_vreg *vreg; + struct regulator_config config = { }; + int ret = 0, cpu = 0; + char name[] = "kraitXX"; + + vreg = saw_get_drv(pdev, &cpu); + if (!vreg) + return -EINVAL; + + saw_np = of_get_parent(np); + if (!saw_np) + return -ENODEV; + + vreg->regmap = syscon_node_to_regmap(saw_np); + of_node_put(saw_np); + if (IS_ERR(config.regmap)) + return PTR_ERR(config.regmap); + + snprintf(name, sizeof(name), "krait%d", cpu); + + config.regmap = vreg->regmap; + config.dev = &pdev->dev; + config.of_node = np; + config.driver_data = vreg; + + vreg->rdesc = saw_regulator; + vreg->rdesc.id = cpu; + vreg->rdesc.name = kstrdup_const(name, GFP_KERNEL); + config.init_data = of_get_regulator_init_data(&pdev->dev, + pdev->dev.of_node, + &vreg->rdesc); + + vreg->rdev = devm_regulator_register(&pdev->dev, &vreg->rdesc, &config); + if (IS_ERR(vreg->rdev)) { + ret = PTR_ERR(vreg->rdev); + dev_err(dev, "failed to register SAW regulator: %d\n", ret); + return ret; + } + + return 0; +} + +static struct platform_driver qcom_saw_regulator_driver = { + .driver = { + .name = "qcom-saw-regulator", + .of_match_table = qcom_saw_regulator_match, + }, + .probe = qcom_saw_regulator_probe, +}; + +module_platform_driver(qcom_saw_regulator_driver); + +MODULE_ALIAS("platform:qcom-saw-regulator"); +MODULE_DESCRIPTION("Qualcomm SAW regulator driver"); +MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 8ed46a9a55c8..8c0ada830ca9 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -20,6 +20,9 @@ #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/soc/qcom/smd-rpm.h> +#include <linux/regulator/qcom_smd-regulator.h> + +#include "internal.h" struct qcom_rpm_reg { struct device *dev; @@ -44,6 +47,11 @@ struct rpm_regulator_req { #define RPM_KEY_SWEN 0x6e657773 /* "swen" */ #define RPM_KEY_UV 0x00007675 /* "uv" */ #define RPM_KEY_MA 0x0000616d /* "ma" */ +#define RPM_KEY_FLOOR 0x00636676 /* "vfc" */ +#define RPM_KEY_CORNER 0x6e726f63 /* "corn" */ + +#define RPM_MIN_FLOOR_CORNER 0 +#define RPM_MAX_FLOOR_CORNER 6 static int rpm_reg_write_active(struct qcom_rpm_reg *vreg, struct rpm_regulator_req *req, @@ -56,6 +64,50 @@ static int rpm_reg_write_active(struct qcom_rpm_reg *vreg, req, size); } +int qcom_rpm_set_floor(struct regulator *regulator, int floor) +{ + struct regulator_dev *rdev = regulator->rdev; + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + struct rpm_regulator_req req; + int ret; + + req.key = RPM_KEY_FLOOR; + req.nbytes = sizeof(u32); + req.value = floor; + + if (floor < RPM_MIN_FLOOR_CORNER || floor > RPM_MAX_FLOOR_CORNER) + return -EINVAL; + + ret = rpm_reg_write_active(vreg, &req, sizeof(req)); + if (ret) + dev_err(rdev_get_dev(rdev), "Failed to set floor %d\n", floor); + + return ret; +} +EXPORT_SYMBOL(qcom_rpm_set_floor); + +int qcom_rpm_set_corner(struct regulator *regulator, int corner) +{ + struct regulator_dev *rdev = regulator->rdev; + struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); + struct rpm_regulator_req req; + int ret; + + req.key = RPM_KEY_CORNER; + req.nbytes = sizeof(u32); + req.value = corner; + + if (corner < RPM_MIN_FLOOR_CORNER || corner > RPM_MAX_FLOOR_CORNER) + return -EINVAL; + + ret = rpm_reg_write_active(vreg, &req, sizeof(req)); + if (ret) + dev_err(rdev_get_dev(rdev), "Failed to set corner %d\n", corner); + + return ret; +} +EXPORT_SYMBOL(qcom_rpm_set_corner); + static int rpm_reg_enable(struct regulator_dev *rdev) { struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 5fcbefcb8636..2f248727ade0 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -1,20 +1,24 @@ menu "Remoteproc drivers" -# REMOTEPROC gets selected by whoever wants it config REMOTEPROC - tristate + tristate "Support for Remote Processor subsystem" depends on HAS_DMA select CRC32 select FW_LOADER select VIRTIO select VIRTUALIZATION + help + Support for remote processors (such as DSP coprocessors). These + are mainly used on embedded systems. + +if REMOTEPROC config OMAP_REMOTEPROC tristate "OMAP remoteproc support" depends on HAS_DMA depends on ARCH_OMAP4 || SOC_OMAP5 depends on OMAP_IOMMU - select REMOTEPROC + depends on REMOTEPROC select MAILBOX select OMAP2PLUS_MBOX select RPMSG_VIRTIO @@ -24,27 +28,17 @@ config OMAP_REMOTEPROC Currently only supported on OMAP4. - Usually you want to say y here, in order to enable multimedia + Usually you want to say Y here, in order to enable multimedia use-cases to run on your platform (multimedia codecs are offloaded to remote DSP processors using this framework). - It's safe to say n here if you're not interested in multimedia + It's safe to say N here if you're not interested in multimedia offloading or just want a bare minimum kernel. -config STE_MODEM_RPROC - tristate "STE-Modem remoteproc support" - depends on HAS_DMA - select REMOTEPROC - default n - help - Say y or m here to support STE-Modem shared memory driver. - This can be either built-in or a loadable module. - If unsure say N. - config WKUP_M3_RPROC tristate "AMx3xx Wakeup M3 remoteproc support" depends on SOC_AM33XX || SOC_AM43XX - select REMOTEPROC + depends on REMOTEPROC help Say y here to support Wakeup M3 remote processor on TI AM33xx and AM43xx family of SoCs. @@ -57,8 +51,8 @@ config WKUP_M3_RPROC config DA8XX_REMOTEPROC tristate "DA8xx/OMAP-L13x remoteproc support" depends on ARCH_DAVINCI_DA8XX + depends on REMOTEPROC select CMA if MMU - select REMOTEPROC select RPMSG_VIRTIO help Say y here to support DA8xx/OMAP-L13x remote processors via the @@ -77,16 +71,32 @@ config DA8XX_REMOTEPROC It's safe to say n here if you're not interested in multimedia offloading. -config QCOM_MDT_LOADER +config QCOM_ADSP_PIL + tristate "Qualcomm ADSP Peripheral Image Loader" + depends on OF && ARCH_QCOM + depends on REMOTEPROC + depends on QCOM_SMEM + depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) + select MFD_SYSCON + select QCOM_MDT_LOADER + select QCOM_RPROC_COMMON + select QCOM_SCM + help + Say y here to support the TrustZone based Peripherial Image Loader + for the Qualcomm ADSP remote processors. + +config QCOM_RPROC_COMMON tristate config QCOM_Q6V5_PIL tristate "Qualcomm Hexagon V5 Peripherial Image Loader" depends on OF && ARCH_QCOM depends on QCOM_SMEM + depends on REMOTEPROC + depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) select MFD_SYSCON - select QCOM_MDT_LOADER - select REMOTEPROC + select QCOM_RPROC_COMMON + select QCOM_SCM help Say y here to support the Qualcomm Peripherial Image Loader for the Hexagon V5 based remote processors. @@ -94,10 +104,12 @@ config QCOM_Q6V5_PIL config QCOM_WCNSS_PIL tristate "Qualcomm WCNSS Peripheral Image Loader" depends on OF && ARCH_QCOM + depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) depends on QCOM_SMEM + depends on REMOTEPROC select QCOM_MDT_LOADER + select QCOM_RPROC_COMMON select QCOM_SCM - select REMOTEPROC help Say y here to support the Peripheral Image Loader for the Qualcomm Wireless Connectivity Subsystem. @@ -105,10 +117,16 @@ config QCOM_WCNSS_PIL config ST_REMOTEPROC tristate "ST remoteproc support" depends on ARCH_STI - select REMOTEPROC + depends on REMOTEPROC help Say y here to support ST's adjunct processors via the remote processor framework. This can be either built-in or a loadable module. +config ST_SLIM_REMOTEPROC + tristate + depends on REMOTEPROC + +endif # REMOTEPROC + endmenu diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index 034b6f3563a7..ffc5e430df27 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -5,15 +5,17 @@ obj-$(CONFIG_REMOTEPROC) += remoteproc.o remoteproc-y := remoteproc_core.o remoteproc-y += remoteproc_debugfs.o +remoteproc-y += remoteproc_sysfs.o remoteproc-y += remoteproc_virtio.o remoteproc-y += remoteproc_elf_loader.o obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o -obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o -obj-$(CONFIG_QCOM_MDT_LOADER) += qcom_mdt_loader.o +obj-$(CONFIG_QCOM_ADSP_PIL) += qcom_adsp_pil.o +obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o qcom_wcnss_pil-y += qcom_wcnss.o qcom_wcnss_pil-y += qcom_wcnss_iris.o obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o +obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c index 1afac8f31be0..3814de28599c 100644 --- a/drivers/remoteproc/da8xx_remoteproc.c +++ b/drivers/remoteproc/da8xx_remoteproc.c @@ -151,7 +151,7 @@ static void da8xx_rproc_kick(struct rproc *rproc, int vqid) writel(SYSCFG_CHIPSIG2, drproc->chipsig); } -static struct rproc_ops da8xx_rproc_ops = { +static const struct rproc_ops da8xx_rproc_ops = { .start = da8xx_rproc_start, .stop = da8xx_rproc_stop, .kick = da8xx_rproc_kick, diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c index fa63bf2eb885..a96ce9083f7f 100644 --- a/drivers/remoteproc/omap_remoteproc.c +++ b/drivers/remoteproc/omap_remoteproc.c @@ -177,7 +177,7 @@ static int omap_rproc_stop(struct rproc *rproc) return 0; } -static struct rproc_ops omap_rproc_ops = { +static const struct rproc_ops omap_rproc_ops = { .start = omap_rproc_start, .stop = omap_rproc_stop, .kick = omap_rproc_kick, diff --git a/drivers/remoteproc/qcom_adsp_pil.c b/drivers/remoteproc/qcom_adsp_pil.c new file mode 100644 index 000000000000..0523b2cfe012 --- /dev/null +++ b/drivers/remoteproc/qcom_adsp_pil.c @@ -0,0 +1,410 @@ +/* + * Qualcomm ADSP Peripheral Image Loader for MSM8974 and MSM8996 + * + * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2014 Sony Mobile Communications AB + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/firmware.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/qcom_scm.h> +#include <linux/regulator/consumer.h> +#include <linux/remoteproc.h> +#include <linux/soc/qcom/mdt_loader.h> +#include <linux/soc/qcom/smem.h> +#include <linux/soc/qcom/smem_state.h> + +#include "qcom_common.h" +#include "remoteproc_internal.h" + +#define ADSP_CRASH_REASON_SMEM 423 +#define ADSP_FIRMWARE_NAME "adsp.mdt" +#define ADSP_PAS_ID 1 + +struct qcom_adsp { + struct device *dev; + struct rproc *rproc; + + int wdog_irq; + int fatal_irq; + int ready_irq; + int handover_irq; + int stop_ack_irq; + + struct qcom_smem_state *state; + unsigned stop_bit; + + struct clk *xo; + + struct regulator *cx_supply; + + struct completion start_done; + struct completion stop_done; + + phys_addr_t mem_phys; + phys_addr_t mem_reloc; + void *mem_region; + size_t mem_size; + + struct qcom_rproc_subdev smd_subdev; +}; + +static int adsp_load(struct rproc *rproc, const struct firmware *fw) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + + return qcom_mdt_load(adsp->dev, fw, rproc->firmware, ADSP_PAS_ID, + adsp->mem_region, adsp->mem_phys, adsp->mem_size); +} + +static const struct rproc_fw_ops adsp_fw_ops = { + .find_rsc_table = qcom_mdt_find_rsc_table, + .load = adsp_load, +}; + +static int adsp_start(struct rproc *rproc) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int ret; + + ret = clk_prepare_enable(adsp->xo); + if (ret) + return ret; + + ret = regulator_enable(adsp->cx_supply); + if (ret) + goto disable_clocks; + + ret = qcom_scm_pas_auth_and_reset(ADSP_PAS_ID); + if (ret) { + dev_err(adsp->dev, + "failed to authenticate image and release reset\n"); + goto disable_regulators; + } + + ret = wait_for_completion_timeout(&adsp->start_done, + msecs_to_jiffies(5000)); + if (!ret) { + dev_err(adsp->dev, "start timed out\n"); + qcom_scm_pas_shutdown(ADSP_PAS_ID); + ret = -ETIMEDOUT; + goto disable_regulators; + } + + ret = 0; + +disable_regulators: + regulator_disable(adsp->cx_supply); +disable_clocks: + clk_disable_unprepare(adsp->xo); + + return ret; +} + +static int adsp_stop(struct rproc *rproc) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int ret; + + qcom_smem_state_update_bits(adsp->state, + BIT(adsp->stop_bit), + BIT(adsp->stop_bit)); + + ret = wait_for_completion_timeout(&adsp->stop_done, + msecs_to_jiffies(5000)); + if (ret == 0) + dev_err(adsp->dev, "timed out on wait\n"); + + qcom_smem_state_update_bits(adsp->state, + BIT(adsp->stop_bit), + 0); + + ret = qcom_scm_pas_shutdown(ADSP_PAS_ID); + if (ret) + dev_err(adsp->dev, "failed to shutdown: %d\n", ret); + + return ret; +} + +static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len) +{ + struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int offset; + + offset = da - adsp->mem_reloc; + if (offset < 0 || offset + len > adsp->mem_size) + return NULL; + + return adsp->mem_region + offset; +} + +static const struct rproc_ops adsp_ops = { + .start = adsp_start, + .stop = adsp_stop, + .da_to_va = adsp_da_to_va, +}; + +static irqreturn_t adsp_wdog_interrupt(int irq, void *dev) +{ + struct qcom_adsp *adsp = dev; + + rproc_report_crash(adsp->rproc, RPROC_WATCHDOG); + + return IRQ_HANDLED; +} + +static irqreturn_t adsp_fatal_interrupt(int irq, void *dev) +{ + struct qcom_adsp *adsp = dev; + size_t len; + char *msg; + + msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, ADSP_CRASH_REASON_SMEM, &len); + if (!IS_ERR(msg) && len > 0 && msg[0]) + dev_err(adsp->dev, "fatal error received: %s\n", msg); + + rproc_report_crash(adsp->rproc, RPROC_FATAL_ERROR); + + if (!IS_ERR(msg)) + msg[0] = '\0'; + + return IRQ_HANDLED; +} + +static irqreturn_t adsp_ready_interrupt(int irq, void *dev) +{ + return IRQ_HANDLED; +} + +static irqreturn_t adsp_handover_interrupt(int irq, void *dev) +{ + struct qcom_adsp *adsp = dev; + + complete(&adsp->start_done); + + return IRQ_HANDLED; +} + +static irqreturn_t adsp_stop_ack_interrupt(int irq, void *dev) +{ + struct qcom_adsp *adsp = dev; + + complete(&adsp->stop_done); + + return IRQ_HANDLED; +} + +static int adsp_init_clock(struct qcom_adsp *adsp) +{ + int ret; + + adsp->xo = devm_clk_get(adsp->dev, "xo"); + if (IS_ERR(adsp->xo)) { + ret = PTR_ERR(adsp->xo); + if (ret != -EPROBE_DEFER) + dev_err(adsp->dev, "failed to get xo clock"); + return ret; + } + + return 0; +} + +static int adsp_init_regulator(struct qcom_adsp *adsp) +{ + adsp->cx_supply = devm_regulator_get(adsp->dev, "cx"); + if (IS_ERR(adsp->cx_supply)) + return PTR_ERR(adsp->cx_supply); + + regulator_set_load(adsp->cx_supply, 100000); + + return 0; +} + +static int adsp_request_irq(struct qcom_adsp *adsp, + struct platform_device *pdev, + const char *name, + irq_handler_t thread_fn) +{ + int ret; + + ret = platform_get_irq_byname(pdev, name); + if (ret < 0) { + dev_err(&pdev->dev, "no %s IRQ defined\n", name); + return ret; + } + + ret = devm_request_threaded_irq(&pdev->dev, ret, + NULL, thread_fn, + IRQF_ONESHOT, + "adsp", adsp); + if (ret) + dev_err(&pdev->dev, "request %s IRQ failed\n", name); + + return ret; +} + +static int adsp_alloc_memory_region(struct qcom_adsp *adsp) +{ + struct device_node *node; + struct resource r; + int ret; + + node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0); + if (!node) { + dev_err(adsp->dev, "no memory-region specified\n"); + return -EINVAL; + } + + ret = of_address_to_resource(node, 0, &r); + if (ret) + return ret; + + adsp->mem_phys = adsp->mem_reloc = r.start; + adsp->mem_size = resource_size(&r); + adsp->mem_region = devm_ioremap_wc(adsp->dev, adsp->mem_phys, adsp->mem_size); + if (!adsp->mem_region) { + dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n", + &r.start, adsp->mem_size); + return -EBUSY; + } + + return 0; +} + +static int adsp_probe(struct platform_device *pdev) +{ + struct qcom_adsp *adsp; + struct rproc *rproc; + int ret; + + if (!qcom_scm_is_available()) + return -EPROBE_DEFER; + + if (!qcom_scm_pas_supported(ADSP_PAS_ID)) { + dev_err(&pdev->dev, "PAS is not available for ADSP\n"); + return -ENXIO; + } + + rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops, + ADSP_FIRMWARE_NAME, sizeof(*adsp)); + if (!rproc) { + dev_err(&pdev->dev, "unable to allocate remoteproc\n"); + return -ENOMEM; + } + + rproc->fw_ops = &adsp_fw_ops; + + adsp = (struct qcom_adsp *)rproc->priv; + adsp->dev = &pdev->dev; + adsp->rproc = rproc; + platform_set_drvdata(pdev, adsp); + + init_completion(&adsp->start_done); + init_completion(&adsp->stop_done); + + ret = adsp_alloc_memory_region(adsp); + if (ret) + goto free_rproc; + + ret = adsp_init_clock(adsp); + if (ret) + goto free_rproc; + + ret = adsp_init_regulator(adsp); + if (ret) + goto free_rproc; + + ret = adsp_request_irq(adsp, pdev, "wdog", adsp_wdog_interrupt); + if (ret < 0) + goto free_rproc; + adsp->wdog_irq = ret; + + ret = adsp_request_irq(adsp, pdev, "fatal", adsp_fatal_interrupt); + if (ret < 0) + goto free_rproc; + adsp->fatal_irq = ret; + + ret = adsp_request_irq(adsp, pdev, "ready", adsp_ready_interrupt); + if (ret < 0) + goto free_rproc; + adsp->ready_irq = ret; + + ret = adsp_request_irq(adsp, pdev, "handover", adsp_handover_interrupt); + if (ret < 0) + goto free_rproc; + adsp->handover_irq = ret; + + ret = adsp_request_irq(adsp, pdev, "stop-ack", adsp_stop_ack_interrupt); + if (ret < 0) + goto free_rproc; + adsp->stop_ack_irq = ret; + + adsp->state = qcom_smem_state_get(&pdev->dev, "stop", + &adsp->stop_bit); + if (IS_ERR(adsp->state)) { + ret = PTR_ERR(adsp->state); + goto free_rproc; + } + + qcom_add_smd_subdev(rproc, &adsp->smd_subdev); + + ret = rproc_add(rproc); + if (ret) + goto free_rproc; + + return 0; + +free_rproc: + rproc_free(rproc); + + return ret; +} + +static int adsp_remove(struct platform_device *pdev) +{ + struct qcom_adsp *adsp = platform_get_drvdata(pdev); + + qcom_smem_state_put(adsp->state); + rproc_del(adsp->rproc); + + qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev); + rproc_free(adsp->rproc); + + return 0; +} + +static const struct of_device_id adsp_of_match[] = { + { .compatible = "qcom,msm8974-adsp-pil" }, + { .compatible = "qcom,msm8996-adsp-pil" }, + { }, +}; +MODULE_DEVICE_TABLE(of, adsp_of_match); + +static struct platform_driver adsp_driver = { + .probe = adsp_probe, + .remove = adsp_remove, + .driver = { + .name = "qcom_adsp_pil", + .of_match_table = adsp_of_match, + }, +}; + +module_platform_driver(adsp_driver); +MODULE_DESCRIPTION("Qualcomm MSM8974/MSM8996 ADSP Peripherial Image Loader"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c new file mode 100644 index 000000000000..bb90481215c6 --- /dev/null +++ b/drivers/remoteproc/qcom_common.c @@ -0,0 +1,96 @@ +/* + * Qualcomm Peripheral Image Loader helpers + * + * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2015 Sony Mobile Communications Inc + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/firmware.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/remoteproc.h> +#include <linux/rpmsg/qcom_smd.h> + +#include "remoteproc_internal.h" +#include "qcom_common.h" + +#define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev) + +/** + * qcom_mdt_find_rsc_table() - provide dummy resource table for remoteproc + * @rproc: remoteproc handle + * @fw: firmware header + * @tablesz: outgoing size of the table + * + * Returns a dummy table. + */ +struct resource_table *qcom_mdt_find_rsc_table(struct rproc *rproc, + const struct firmware *fw, + int *tablesz) +{ + static struct resource_table table = { .ver = 1, }; + + *tablesz = sizeof(table); + return &table; +} +EXPORT_SYMBOL_GPL(qcom_mdt_find_rsc_table); + +static int smd_subdev_probe(struct rproc_subdev *subdev) +{ + struct qcom_rproc_subdev *smd = to_smd_subdev(subdev); + + smd->edge = qcom_smd_register_edge(smd->dev, smd->node); + + return IS_ERR(smd->edge) ? PTR_ERR(smd->edge) : 0; +} + +static void smd_subdev_remove(struct rproc_subdev *subdev) +{ + struct qcom_rproc_subdev *smd = to_smd_subdev(subdev); + + qcom_smd_unregister_edge(smd->edge); + smd->edge = NULL; +} + +/** + * qcom_add_smd_subdev() - try to add a SMD subdevice to rproc + * @rproc: rproc handle to parent the subdevice + * @smd: reference to a Qualcomm subdev context + */ +void qcom_add_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd) +{ + struct device *dev = &rproc->dev; + + smd->node = of_get_child_by_name(dev->parent->of_node, "smd-edge"); + if (!smd->node) + return; + + smd->dev = dev; + rproc_add_subdev(rproc, &smd->subdev, smd_subdev_probe, smd_subdev_remove); +} +EXPORT_SYMBOL_GPL(qcom_add_smd_subdev); + +/** + * qcom_remove_smd_subdev() - remove the smd subdevice from rproc + * @rproc: rproc handle + * @smd: the SMD subdevice to remove + */ +void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd) +{ + rproc_remove_subdev(rproc, &smd->subdev); + of_node_put(smd->node); +} +EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev); + +MODULE_DESCRIPTION("Qualcomm Remoteproc helper driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h new file mode 100644 index 000000000000..db5c826d5cd4 --- /dev/null +++ b/drivers/remoteproc/qcom_common.h @@ -0,0 +1,22 @@ +#ifndef __RPROC_QCOM_COMMON_H__ +#define __RPROC_QCOM_COMMON_H__ + +#include <linux/remoteproc.h> +#include "remoteproc_internal.h" + +struct qcom_rproc_subdev { + struct rproc_subdev subdev; + + struct device *dev; + struct device_node *node; + struct qcom_smd_edge *edge; +}; + +struct resource_table *qcom_mdt_find_rsc_table(struct rproc *rproc, + const struct firmware *fw, + int *tablesz); + +void qcom_add_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd); +void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd); + +#endif diff --git a/drivers/remoteproc/qcom_mdt_loader.c b/drivers/remoteproc/qcom_mdt_loader.c deleted file mode 100644 index 04db02d9059d..000000000000 --- a/drivers/remoteproc/qcom_mdt_loader.c +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Qualcomm Peripheral Image Loader - * - * Copyright (C) 2016 Linaro Ltd - * Copyright (C) 2015 Sony Mobile Communications Inc - * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/elf.h> -#include <linux/firmware.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/remoteproc.h> -#include <linux/slab.h> - -#include "remoteproc_internal.h" -#include "qcom_mdt_loader.h" - -/** - * qcom_mdt_find_rsc_table() - provide dummy resource table for remoteproc - * @rproc: remoteproc handle - * @fw: firmware header - * @tablesz: outgoing size of the table - * - * Returns a dummy table. - */ -struct resource_table *qcom_mdt_find_rsc_table(struct rproc *rproc, - const struct firmware *fw, - int *tablesz) -{ - static struct resource_table table = { .ver = 1, }; - - *tablesz = sizeof(table); - return &table; -} -EXPORT_SYMBOL_GPL(qcom_mdt_find_rsc_table); - -/** - * qcom_mdt_parse() - extract useful parameters from the mdt header - * @fw: firmware handle - * @fw_addr: optional reference for base of the firmware's memory region - * @fw_size: optional reference for size of the firmware's memory region - * @fw_relocate: optional reference for flagging if the firmware is relocatable - * - * Returns 0 on success, negative errno otherwise. - */ -int qcom_mdt_parse(const struct firmware *fw, phys_addr_t *fw_addr, - size_t *fw_size, bool *fw_relocate) -{ - const struct elf32_phdr *phdrs; - const struct elf32_phdr *phdr; - const struct elf32_hdr *ehdr; - phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX; - phys_addr_t max_addr = 0; - bool relocate = false; - int i; - - ehdr = (struct elf32_hdr *)fw->data; - phdrs = (struct elf32_phdr *)(ehdr + 1); - - for (i = 0; i < ehdr->e_phnum; i++) { - phdr = &phdrs[i]; - - if (phdr->p_type != PT_LOAD) - continue; - - if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) - continue; - - if (!phdr->p_memsz) - continue; - - if (phdr->p_flags & QCOM_MDT_RELOCATABLE) - relocate = true; - - if (phdr->p_paddr < min_addr) - min_addr = phdr->p_paddr; - - if (phdr->p_paddr + phdr->p_memsz > max_addr) - max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); - } - - if (fw_addr) - *fw_addr = min_addr; - if (fw_size) - *fw_size = max_addr - min_addr; - if (fw_relocate) - *fw_relocate = relocate; - - return 0; -} -EXPORT_SYMBOL_GPL(qcom_mdt_parse); - -/** - * qcom_mdt_load() - load the firmware which header is defined in fw - * @rproc: rproc handle - * @fw: frimware object for the header - * @firmware: filename of the firmware, for building .bXX names - * - * Returns 0 on success, negative errno otherwise. - */ -int qcom_mdt_load(struct rproc *rproc, - const struct firmware *fw, - const char *firmware) -{ - const struct elf32_phdr *phdrs; - const struct elf32_phdr *phdr; - const struct elf32_hdr *ehdr; - const struct firmware *seg_fw; - size_t fw_name_len; - char *fw_name; - void *ptr; - int ret; - int i; - - ehdr = (struct elf32_hdr *)fw->data; - phdrs = (struct elf32_phdr *)(ehdr + 1); - - fw_name_len = strlen(firmware); - if (fw_name_len <= 4) - return -EINVAL; - - fw_name = kstrdup(firmware, GFP_KERNEL); - if (!fw_name) - return -ENOMEM; - - for (i = 0; i < ehdr->e_phnum; i++) { - phdr = &phdrs[i]; - - if (phdr->p_type != PT_LOAD) - continue; - - if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) - continue; - - if (!phdr->p_memsz) - continue; - - ptr = rproc_da_to_va(rproc, phdr->p_paddr, phdr->p_memsz); - if (!ptr) { - dev_err(&rproc->dev, "segment outside memory range\n"); - ret = -EINVAL; - break; - } - - if (phdr->p_filesz) { - sprintf(fw_name + fw_name_len - 3, "b%02d", i); - ret = request_firmware(&seg_fw, fw_name, &rproc->dev); - if (ret) { - dev_err(&rproc->dev, "failed to load %s\n", - fw_name); - break; - } - - memcpy(ptr, seg_fw->data, seg_fw->size); - - release_firmware(seg_fw); - } - - if (phdr->p_memsz > phdr->p_filesz) - memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); - } - - kfree(fw_name); - - return ret; -} -EXPORT_SYMBOL_GPL(qcom_mdt_load); - -MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_mdt_loader.h b/drivers/remoteproc/qcom_mdt_loader.h deleted file mode 100644 index c5d7122755b6..000000000000 --- a/drivers/remoteproc/qcom_mdt_loader.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __QCOM_MDT_LOADER_H__ -#define __QCOM_MDT_LOADER_H__ - -#define QCOM_MDT_TYPE_MASK (7 << 24) -#define QCOM_MDT_TYPE_HASH (2 << 24) -#define QCOM_MDT_RELOCATABLE BIT(27) - -struct resource_table * qcom_mdt_find_rsc_table(struct rproc *rproc, const struct firmware *fw, int *tablesz); -int qcom_mdt_load(struct rproc *rproc, const struct firmware *fw, const char *fw_name); - -int qcom_mdt_parse(const struct firmware *fw, phys_addr_t *fw_addr, size_t *fw_size, bool *fw_relocate); - -#endif diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c index 2e0caaaa766a..56feca7c7544 100644 --- a/drivers/remoteproc/qcom_q6v5_pil.c +++ b/drivers/remoteproc/qcom_q6v5_pil.c @@ -23,22 +23,21 @@ #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/remoteproc.h> #include <linux/reset.h> +#include <linux/soc/qcom/mdt_loader.h> #include <linux/soc/qcom/smem.h> #include <linux/soc/qcom/smem_state.h> #include "remoteproc_internal.h" -#include "qcom_mdt_loader.h" +#include "qcom_common.h" #include <linux/qcom_scm.h> -#define MBA_FIRMWARE_NAME "mba.b00" -#define MPSS_FIRMWARE_NAME "modem.mdt" - #define MPSS_CRASH_REASON_SMEM 421 /* RMB Status Register Values */ @@ -93,6 +92,26 @@ #define QDSS_BHS_ON BIT(21) #define QDSS_LDO_BYP BIT(22) +struct reg_info { + struct regulator *reg; + int uV; + int uA; +}; + +struct qcom_mss_reg_res { + const char *supply; + int uV; + int uA; +}; + +struct rproc_hexagon_res { + const char *hexagon_mba_image; + struct qcom_mss_reg_res proxy_supply[4]; + struct qcom_mss_reg_res active_supply[2]; + char **proxy_clk_names; + char **active_clk_names; +}; + struct q6v5 { struct device *dev; struct rproc *rproc; @@ -110,11 +129,15 @@ struct q6v5 { struct qcom_smem_state *state; unsigned stop_bit; - struct regulator_bulk_data supply[4]; + struct clk *active_clks[8]; + struct clk *proxy_clks[4]; + int active_clk_count; + int proxy_clk_count; - struct clk *ahb_clk; - struct clk *axi_clk; - struct clk *rom_clk; + struct reg_info active_regs[1]; + struct reg_info proxy_regs[3]; + int active_reg_count; + int proxy_reg_count; struct completion start_done; struct completion stop_done; @@ -128,65 +151,141 @@ struct q6v5 { phys_addr_t mpss_reloc; void *mpss_region; size_t mpss_size; -}; -enum { - Q6V5_SUPPLY_CX, - Q6V5_SUPPLY_MX, - Q6V5_SUPPLY_MSS, - Q6V5_SUPPLY_PLL, + struct qcom_rproc_subdev smd_subdev; }; -static int q6v5_regulator_init(struct q6v5 *qproc) +static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, + const struct qcom_mss_reg_res *reg_res) { - int ret; + int rc; + int i; - qproc->supply[Q6V5_SUPPLY_CX].supply = "cx"; - qproc->supply[Q6V5_SUPPLY_MX].supply = "mx"; - qproc->supply[Q6V5_SUPPLY_MSS].supply = "mss"; - qproc->supply[Q6V5_SUPPLY_PLL].supply = "pll"; + if (!reg_res) + return 0; + + for (i = 0; reg_res[i].supply; i++) { + regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); + if (IS_ERR(regs[i].reg)) { + rc = PTR_ERR(regs[i].reg); + if (rc != -EPROBE_DEFER) + dev_err(dev, "Failed to get %s\n regulator", + reg_res[i].supply); + return rc; + } - ret = devm_regulator_bulk_get(qproc->dev, - ARRAY_SIZE(qproc->supply), qproc->supply); - if (ret < 0) { - dev_err(qproc->dev, "failed to get supplies\n"); - return ret; + regs[i].uV = reg_res[i].uV; + regs[i].uA = reg_res[i].uA; } - regulator_set_load(qproc->supply[Q6V5_SUPPLY_CX].consumer, 100000); - regulator_set_load(qproc->supply[Q6V5_SUPPLY_MSS].consumer, 100000); - regulator_set_load(qproc->supply[Q6V5_SUPPLY_PLL].consumer, 10000); + return i; +} + +static int q6v5_regulator_enable(struct q6v5 *qproc, + struct reg_info *regs, int count) +{ + int ret; + int i; + + for (i = 0; i < count; i++) { + if (regs[i].uV > 0) { + ret = regulator_set_voltage(regs[i].reg, + regs[i].uV, INT_MAX); + if (ret) { + dev_err(qproc->dev, + "Failed to request voltage for %d.\n", + i); + goto err; + } + } + + if (regs[i].uA > 0) { + ret = regulator_set_load(regs[i].reg, + regs[i].uA); + if (ret < 0) { + dev_err(qproc->dev, + "Failed to set regulator mode\n"); + goto err; + } + } + + ret = regulator_enable(regs[i].reg); + if (ret) { + dev_err(qproc->dev, "Regulator enable failed\n"); + goto err; + } + } return 0; +err: + for (; i >= 0; i--) { + if (regs[i].uV > 0) + regulator_set_voltage(regs[i].reg, 0, INT_MAX); + + if (regs[i].uA > 0) + regulator_set_load(regs[i].reg, 0); + + regulator_disable(regs[i].reg); + } + + return ret; } -static int q6v5_regulator_enable(struct q6v5 *qproc) +static void q6v5_regulator_disable(struct q6v5 *qproc, + struct reg_info *regs, int count) { - struct regulator *mss = qproc->supply[Q6V5_SUPPLY_MSS].consumer; - struct regulator *mx = qproc->supply[Q6V5_SUPPLY_MX].consumer; - int ret; + int i; - /* TODO: Q6V5_SUPPLY_CX is supposed to be set to super-turbo here */ + for (i = 0; i < count; i++) { + if (regs[i].uV > 0) + regulator_set_voltage(regs[i].reg, 0, INT_MAX); - ret = regulator_set_voltage(mx, 1050000, INT_MAX); - if (ret) - return ret; + if (regs[i].uA > 0) + regulator_set_load(regs[i].reg, 0); + + regulator_disable(regs[i].reg); + } +} - regulator_set_voltage(mss, 1000000, 1150000); +static int q6v5_clk_enable(struct device *dev, + struct clk **clks, int count) +{ + int rc; + int i; - return regulator_bulk_enable(ARRAY_SIZE(qproc->supply), qproc->supply); + for (i = 0; i < count; i++) { + rc = clk_prepare_enable(clks[i]); + if (rc) { + dev_err(dev, "Clock enable failed\n"); + goto err; + } + } + + return 0; +err: + for (i--; i >= 0; i--) + clk_disable_unprepare(clks[i]); + + return rc; } -static void q6v5_regulator_disable(struct q6v5 *qproc) +static void q6v5_clk_disable(struct device *dev, + struct clk **clks, int count) { - struct regulator *mss = qproc->supply[Q6V5_SUPPLY_MSS].consumer; - struct regulator *mx = qproc->supply[Q6V5_SUPPLY_MX].consumer; + int i; + + for (i = 0; i < count; i++) + clk_disable_unprepare(clks[i]); +} - /* TODO: Q6V5_SUPPLY_CX corner votes should be released */ +static struct resource_table *q6v5_find_rsc_table(struct rproc *rproc, + const struct firmware *fw, + int *tablesz) +{ + static struct resource_table table = { .ver = 1, }; - regulator_bulk_disable(ARRAY_SIZE(qproc->supply), qproc->supply); - regulator_set_voltage(mx, 0, INT_MAX); - regulator_set_voltage(mss, 0, 1150000); + *tablesz = sizeof(table); + return &table; } static int q6v5_load(struct rproc *rproc, const struct firmware *fw) @@ -199,7 +298,7 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw) } static const struct rproc_fw_ops q6v5_fw_ops = { - .find_rsc_table = qcom_mdt_find_rsc_table, + .find_rsc_table = q6v5_find_rsc_table, .load = q6v5_load, }; @@ -376,45 +475,109 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) return ret < 0 ? ret : 0; } -static int q6v5_mpss_validate(struct q6v5 *qproc, const struct firmware *fw) +static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) +{ + if (phdr->p_type != PT_LOAD) + return false; + + if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) + return false; + + if (!phdr->p_memsz) + return false; + + return true; +} + +static int q6v5_mpss_load(struct q6v5 *qproc) { const struct elf32_phdr *phdrs; const struct elf32_phdr *phdr; + const struct firmware *seg_fw; + const struct firmware *fw; struct elf32_hdr *ehdr; + phys_addr_t mpss_reloc; phys_addr_t boot_addr; - phys_addr_t fw_addr; - bool relocate; + phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX; + phys_addr_t max_addr = 0; + bool relocate = false; + char seg_name[10]; + ssize_t offset; size_t size; + void *ptr; int ret; int i; - ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate); - if (ret) { - dev_err(qproc->dev, "failed to parse mdt header\n"); + ret = request_firmware(&fw, "modem.mdt", qproc->dev); + if (ret < 0) { + dev_err(qproc->dev, "unable to load modem.mdt\n"); return ret; } - if (relocate) - boot_addr = qproc->mpss_phys; - else - boot_addr = fw_addr; + /* Initialize the RMB validator */ + writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + + ret = q6v5_mpss_init_image(qproc, fw); + if (ret) + goto release_firmware; ehdr = (struct elf32_hdr *)fw->data; phdrs = (struct elf32_phdr *)(ehdr + 1); - for (i = 0; i < ehdr->e_phnum; i++, phdr++) { + + for (i = 0; i < ehdr->e_phnum; i++) { phdr = &phdrs[i]; - if (phdr->p_type != PT_LOAD) + if (!q6v5_phdr_valid(phdr)) continue; - if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) - continue; + if (phdr->p_flags & QCOM_MDT_RELOCATABLE) + relocate = true; + + if (phdr->p_paddr < min_addr) + min_addr = phdr->p_paddr; + + if (phdr->p_paddr + phdr->p_memsz > max_addr) + max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); + } + + mpss_reloc = relocate ? min_addr : qproc->mpss_phys; - if (!phdr->p_memsz) + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!q6v5_phdr_valid(phdr)) continue; + offset = phdr->p_paddr - mpss_reloc; + if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { + dev_err(qproc->dev, "segment outside memory range\n"); + ret = -EINVAL; + goto release_firmware; + } + + ptr = qproc->mpss_region + offset; + + if (phdr->p_filesz) { + snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i); + ret = request_firmware(&seg_fw, seg_name, qproc->dev); + if (ret) { + dev_err(qproc->dev, "failed to load %s\n", seg_name); + goto release_firmware; + } + + memcpy(ptr, seg_fw->data, seg_fw->size); + + release_firmware(seg_fw); + } + + if (phdr->p_memsz > phdr->p_filesz) { + memset(ptr + phdr->p_filesz, 0, + phdr->p_memsz - phdr->p_filesz); + } + size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); if (!size) { + boot_addr = relocate ? qproc->mpss_phys : min_addr; writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); } @@ -429,44 +592,6 @@ static int q6v5_mpss_validate(struct q6v5 *qproc, const struct firmware *fw) else if (ret < 0) dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); - return ret < 0 ? ret : 0; -} - -static int q6v5_mpss_load(struct q6v5 *qproc) -{ - const struct firmware *fw; - phys_addr_t fw_addr; - bool relocate; - int ret; - - ret = request_firmware(&fw, MPSS_FIRMWARE_NAME, qproc->dev); - if (ret < 0) { - dev_err(qproc->dev, "unable to load " MPSS_FIRMWARE_NAME "\n"); - return ret; - } - - ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate); - if (ret) { - dev_err(qproc->dev, "failed to parse mdt header\n"); - goto release_firmware; - } - - if (relocate) - qproc->mpss_reloc = fw_addr; - - /* Initialize the RMB validator */ - writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); - - ret = q6v5_mpss_init_image(qproc, fw); - if (ret) - goto release_firmware; - - ret = qcom_mdt_load(qproc->rproc, fw, MPSS_FIRMWARE_NAME); - if (ret) - goto release_firmware; - - ret = q6v5_mpss_validate(qproc, fw); - release_firmware: release_firmware(fw); @@ -478,29 +603,38 @@ static int q6v5_start(struct rproc *rproc) struct q6v5 *qproc = (struct q6v5 *)rproc->priv; int ret; - ret = q6v5_regulator_enable(qproc); + ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, + qproc->proxy_reg_count); if (ret) { - dev_err(qproc->dev, "failed to enable supplies\n"); + dev_err(qproc->dev, "failed to enable proxy supplies\n"); return ret; } + ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, + qproc->proxy_clk_count); + if (ret) { + dev_err(qproc->dev, "failed to enable proxy clocks\n"); + goto disable_proxy_reg; + } + + ret = q6v5_regulator_enable(qproc, qproc->active_regs, + qproc->active_reg_count); + if (ret) { + dev_err(qproc->dev, "failed to enable supplies\n"); + goto disable_proxy_clk; + } ret = reset_control_deassert(qproc->mss_restart); if (ret) { dev_err(qproc->dev, "failed to deassert mss restart\n"); goto disable_vdd; } - ret = clk_prepare_enable(qproc->ahb_clk); - if (ret) + ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, + qproc->active_clk_count); + if (ret) { + dev_err(qproc->dev, "failed to enable clocks\n"); goto assert_reset; - - ret = clk_prepare_enable(qproc->axi_clk); - if (ret) - goto disable_ahb_clk; - - ret = clk_prepare_enable(qproc->rom_clk); - if (ret) - goto disable_axi_clk; + } writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); @@ -535,7 +669,10 @@ static int q6v5_start(struct rproc *rproc) qproc->running = true; - /* TODO: All done, release the handover resources */ + q6v5_clk_disable(qproc->dev, qproc->proxy_clks, + qproc->proxy_clk_count); + q6v5_regulator_disable(qproc, qproc->proxy_regs, + qproc->proxy_reg_count); return 0; @@ -543,16 +680,19 @@ halt_axi_ports: q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); - - clk_disable_unprepare(qproc->rom_clk); -disable_axi_clk: - clk_disable_unprepare(qproc->axi_clk); -disable_ahb_clk: - clk_disable_unprepare(qproc->ahb_clk); + q6v5_clk_disable(qproc->dev, qproc->active_clks, + qproc->active_clk_count); assert_reset: reset_control_assert(qproc->mss_restart); disable_vdd: - q6v5_regulator_disable(qproc); + q6v5_regulator_disable(qproc, qproc->active_regs, + qproc->active_reg_count); +disable_proxy_clk: + q6v5_clk_disable(qproc->dev, qproc->proxy_clks, + qproc->proxy_clk_count); +disable_proxy_reg: + q6v5_regulator_disable(qproc, qproc->proxy_regs, + qproc->proxy_reg_count); return ret; } @@ -579,10 +719,10 @@ static int q6v5_stop(struct rproc *rproc) q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); reset_control_assert(qproc->mss_restart); - clk_disable_unprepare(qproc->rom_clk); - clk_disable_unprepare(qproc->axi_clk); - clk_disable_unprepare(qproc->ahb_clk); - q6v5_regulator_disable(qproc); + q6v5_clk_disable(qproc->dev, qproc->active_clks, + qproc->active_clk_count); + q6v5_regulator_disable(qproc, qproc->active_regs, + qproc->active_reg_count); return 0; } @@ -702,27 +842,27 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) return 0; } -static int q6v5_init_clocks(struct q6v5 *qproc) +static int q6v5_init_clocks(struct device *dev, struct clk **clks, + char **clk_names) { - qproc->ahb_clk = devm_clk_get(qproc->dev, "iface"); - if (IS_ERR(qproc->ahb_clk)) { - dev_err(qproc->dev, "failed to get iface clock\n"); - return PTR_ERR(qproc->ahb_clk); - } + int i; - qproc->axi_clk = devm_clk_get(qproc->dev, "bus"); - if (IS_ERR(qproc->axi_clk)) { - dev_err(qproc->dev, "failed to get bus clock\n"); - return PTR_ERR(qproc->axi_clk); - } + if (!clk_names) + return 0; - qproc->rom_clk = devm_clk_get(qproc->dev, "mem"); - if (IS_ERR(qproc->rom_clk)) { - dev_err(qproc->dev, "failed to get mem clock\n"); - return PTR_ERR(qproc->rom_clk); + for (i = 0; clk_names[i]; i++) { + clks[i] = devm_clk_get(dev, clk_names[i]); + if (IS_ERR(clks[i])) { + int rc = PTR_ERR(clks[i]); + + if (rc != -EPROBE_DEFER) + dev_err(dev, "Failed to get %s clock\n", + clk_names[i]); + return rc; + } } - return 0; + return i; } static int q6v5_init_reset(struct q6v5 *qproc) @@ -805,12 +945,17 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) static int q6v5_probe(struct platform_device *pdev) { + const struct rproc_hexagon_res *desc; struct q6v5 *qproc; struct rproc *rproc; int ret; + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, - MBA_FIRMWARE_NAME, sizeof(*qproc)); + desc->hexagon_mba_image, sizeof(*qproc)); if (!rproc) { dev_err(&pdev->dev, "failed to allocate rproc\n"); return -ENOMEM; @@ -834,13 +979,37 @@ static int q6v5_probe(struct platform_device *pdev) if (ret) goto free_rproc; - ret = q6v5_init_clocks(qproc); - if (ret) + ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, + desc->proxy_clk_names); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); goto free_rproc; + } + qproc->proxy_clk_count = ret; - ret = q6v5_regulator_init(qproc); - if (ret) + ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, + desc->active_clk_names); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get active clocks.\n"); + goto free_rproc; + } + qproc->active_clk_count = ret; + + ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, + desc->proxy_supply); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); + goto free_rproc; + } + qproc->proxy_reg_count = ret; + + ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, + desc->active_supply); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get active regulators.\n"); goto free_rproc; + } + qproc->active_reg_count = ret; ret = q6v5_init_reset(qproc); if (ret) @@ -868,6 +1037,8 @@ static int q6v5_probe(struct platform_device *pdev) goto free_rproc; } + qcom_add_smd_subdev(rproc, &qproc->smd_subdev); + ret = rproc_add(rproc); if (ret) goto free_rproc; @@ -885,15 +1056,86 @@ static int q6v5_remove(struct platform_device *pdev) struct q6v5 *qproc = platform_get_drvdata(pdev); rproc_del(qproc->rproc); + + qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev); rproc_free(qproc->rproc); return 0; } +static const struct rproc_hexagon_res msm8916_mss = { + .hexagon_mba_image = "mba.mbn", + .proxy_supply = (struct qcom_mss_reg_res[]) { + { + .supply = "mx", + .uV = 1050000, + }, + { + .supply = "cx", + .uA = 100000, + }, + { + .supply = "pll", + .uA = 100000, + }, + {} + }, + .proxy_clk_names = (char*[]){ + "xo", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "mem", + NULL + }, +}; + +static const struct rproc_hexagon_res msm8974_mss = { + .hexagon_mba_image = "mba.b00", + .proxy_supply = (struct qcom_mss_reg_res[]) { + { + .supply = "mx", + .uV = 1050000, + }, + { + .supply = "cx", + .uA = 100000, + }, + { + .supply = "pll", + .uA = 100000, + }, + {} + }, + .active_supply = (struct qcom_mss_reg_res[]) { + { + .supply = "mss", + .uV = 1050000, + .uA = 100000, + }, + {} + }, + .proxy_clk_names = (char*[]){ + "xo", + NULL + }, + .active_clk_names = (char*[]){ + "iface", + "bus", + "mem", + NULL + }, +}; + static const struct of_device_id q6v5_of_match[] = { - { .compatible = "qcom,q6v5-pil", }, + { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, + { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, + { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, { }, }; +MODULE_DEVICE_TABLE(of, q6v5_of_match); static struct platform_driver q6v5_driver = { .probe = q6v5_probe, diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index 323b629474a6..c7686393d505 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -28,10 +28,12 @@ #include <linux/qcom_scm.h> #include <linux/regulator/consumer.h> #include <linux/remoteproc.h> +#include <linux/soc/qcom/mdt_loader.h> #include <linux/soc/qcom/smem.h> #include <linux/soc/qcom/smem_state.h> +#include <linux/rpmsg/qcom_smd.h> -#include "qcom_mdt_loader.h" +#include "qcom_common.h" #include "remoteproc_internal.h" #include "qcom_wcnss.h" @@ -94,6 +96,8 @@ struct qcom_wcnss { phys_addr_t mem_reloc; void *mem_region; size_t mem_size; + + struct qcom_rproc_subdev smd_subdev; }; static const struct wcnss_data riva_data = { @@ -147,34 +151,9 @@ void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss, static int wcnss_load(struct rproc *rproc, const struct firmware *fw) { struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; - phys_addr_t fw_addr; - size_t fw_size; - bool relocate; - int ret; - - ret = qcom_scm_pas_init_image(WCNSS_PAS_ID, fw->data, fw->size); - if (ret) { - dev_err(&rproc->dev, "invalid firmware metadata\n"); - return ret; - } - - ret = qcom_mdt_parse(fw, &fw_addr, &fw_size, &relocate); - if (ret) { - dev_err(&rproc->dev, "failed to parse mdt header\n"); - return ret; - } - - if (relocate) { - wcnss->mem_reloc = fw_addr; - - ret = qcom_scm_pas_mem_setup(WCNSS_PAS_ID, wcnss->mem_phys, fw_size); - if (ret) { - dev_err(&rproc->dev, "unable to setup memory for image\n"); - return ret; - } - } - return qcom_mdt_load(rproc, fw, rproc->firmware); + return qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID, + wcnss->mem_region, wcnss->mem_phys, wcnss->mem_size); } static const struct rproc_fw_ops wcnss_fw_ops = { @@ -577,6 +556,8 @@ static int wcnss_probe(struct platform_device *pdev) } } + qcom_add_smd_subdev(rproc, &wcnss->smd_subdev); + ret = rproc_add(rproc); if (ret) goto free_rproc; @@ -597,6 +578,8 @@ static int wcnss_remove(struct platform_device *pdev) qcom_smem_state_put(wcnss->state); rproc_del(wcnss->rproc); + + qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev); rproc_free(wcnss->rproc); return 0; @@ -608,6 +591,7 @@ static const struct of_device_id wcnss_of_match[] = { { .compatible = "qcom,pronto-v2-pil", &pronto_v2_data }, { }, }; +MODULE_DEVICE_TABLE(of, wcnss_of_match); static struct platform_driver wcnss_driver = { .probe = wcnss_probe, diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c index 05d6e175411a..e842be58e8c7 100644 --- a/drivers/remoteproc/qcom_wcnss_iris.c +++ b/drivers/remoteproc/qcom_wcnss_iris.c @@ -171,6 +171,7 @@ static const struct of_device_id iris_of_match[] = { { .compatible = "qcom,wcn3680", .data = &wcn3680_data }, {} }; +MODULE_DEVICE_TABLE(of, iris_of_match); struct platform_driver qcom_iris_driver = { .probe = qcom_iris_probe, diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index c6bfb3496684..90b05c72186c 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -236,6 +236,10 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) } notifyid = ret; + /* Potentially bump max_notifyid */ + if (notifyid > rproc->max_notifyid) + rproc->max_notifyid = notifyid; + dev_dbg(dev, "vring%d: va %p dma %pad size 0x%x idr %d\n", i, va, &dma, size, notifyid); @@ -296,6 +300,20 @@ void rproc_free_vring(struct rproc_vring *rvring) rsc->vring[idx].notifyid = -1; } +static int rproc_vdev_do_probe(struct rproc_subdev *subdev) +{ + struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); + + return rproc_add_virtio_dev(rvdev, rvdev->id); +} + +static void rproc_vdev_do_remove(struct rproc_subdev *subdev) +{ + struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); + + rproc_remove_virtio_dev(rvdev); +} + /** * rproc_handle_vdev() - handle a vdev fw resource * @rproc: the remote processor @@ -356,6 +374,9 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, if (!rvdev) return -ENOMEM; + kref_init(&rvdev->refcount); + + rvdev->id = rsc->id; rvdev->rproc = rproc; /* parse the vrings */ @@ -368,22 +389,48 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, /* remember the resource offset*/ rvdev->rsc_offset = offset; + /* allocate the vring resources */ + for (i = 0; i < rsc->num_of_vrings; i++) { + ret = rproc_alloc_vring(rvdev, i); + if (ret) + goto unwind_vring_allocations; + } + list_add_tail(&rvdev->node, &rproc->rvdevs); - /* it is now safe to add the virtio device */ - ret = rproc_add_virtio_dev(rvdev, rsc->id); - if (ret) - goto remove_rvdev; + rproc_add_subdev(rproc, &rvdev->subdev, + rproc_vdev_do_probe, rproc_vdev_do_remove); return 0; -remove_rvdev: - list_del(&rvdev->node); +unwind_vring_allocations: + for (i--; i >= 0; i--) + rproc_free_vring(&rvdev->vring[i]); free_rvdev: kfree(rvdev); return ret; } +void rproc_vdev_release(struct kref *ref) +{ + struct rproc_vdev *rvdev = container_of(ref, struct rproc_vdev, refcount); + struct rproc_vring *rvring; + struct rproc *rproc = rvdev->rproc; + int id; + + for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) { + rvring = &rvdev->vring[id]; + if (!rvring->va) + continue; + + rproc_free_vring(rvring); + } + + rproc_remove_subdev(rproc, &rvdev->subdev); + list_del(&rvdev->node); + kfree(rvdev); +} + /** * rproc_handle_trace() - handle a shared trace buffer resource * @rproc: the remote processor @@ -673,15 +720,6 @@ free_carv: return ret; } -static int rproc_count_vrings(struct rproc *rproc, struct fw_rsc_vdev *rsc, - int offset, int avail) -{ - /* Summarize the number of notification IDs */ - rproc->max_notifyid += rsc->num_of_vrings; - - return 0; -} - /* * A lookup table for resource handlers. The indices are defined in * enum fw_resource_type. @@ -690,10 +728,6 @@ static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = { [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, - [RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings, -}; - -static rproc_handle_resource_t rproc_vdev_handler[RSC_LAST] = { [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev, }; @@ -736,6 +770,34 @@ static int rproc_handle_resources(struct rproc *rproc, int len, return ret; } +static int rproc_probe_subdevices(struct rproc *rproc) +{ + struct rproc_subdev *subdev; + int ret; + + list_for_each_entry(subdev, &rproc->subdevs, node) { + ret = subdev->probe(subdev); + if (ret) + goto unroll_registration; + } + + return 0; + +unroll_registration: + list_for_each_entry_continue_reverse(subdev, &rproc->subdevs, node) + subdev->remove(subdev); + + return ret; +} + +static void rproc_remove_subdevices(struct rproc *rproc) +{ + struct rproc_subdev *subdev; + + list_for_each_entry(subdev, &rproc->subdevs, node) + subdev->remove(subdev); +} + /** * rproc_resource_cleanup() - clean up and free all acquired resources * @rproc: rproc handle @@ -782,7 +844,7 @@ static void rproc_resource_cleanup(struct rproc *rproc) /* clean up remote vdev entries */ list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) - rproc_remove_virtio_dev(rvdev); + kref_put(&rvdev->refcount, rproc_vdev_release); } /* @@ -836,13 +898,6 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) /* reset max_notifyid */ rproc->max_notifyid = -1; - /* look for virtio devices and register them */ - ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler); - if (ret) { - dev_err(dev, "Failed to handle vdev resources: %d\n", ret); - goto clean_up; - } - /* handle fw resources which are required to boot rproc */ ret = rproc_handle_resources(rproc, tablesz, rproc_loading_handlers); if (ret) { @@ -878,12 +933,22 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) goto clean_up_resources; } + /* probe any subdevices for the remote processor */ + ret = rproc_probe_subdevices(rproc); + if (ret) { + dev_err(dev, "failed to probe subdevices for %s: %d\n", + rproc->name, ret); + goto stop_rproc; + } + rproc->state = RPROC_RUNNING; dev_info(dev, "remote processor %s is now up\n", rproc->name); return 0; +stop_rproc: + rproc->ops->stop(rproc); clean_up_resources: rproc_resource_cleanup(rproc); clean_up: @@ -909,7 +974,7 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context) /* if rproc is marked always-on, request it to boot */ if (rproc->auto_boot) - rproc_boot_nowait(rproc); + rproc_boot(rproc); release_firmware(fw); /* allow rproc_del() contexts, if any, to proceed */ @@ -1007,7 +1072,6 @@ static void rproc_crash_handler_work(struct work_struct *work) /** * __rproc_boot() - boot a remote processor * @rproc: handle of a remote processor - * @wait: wait for rproc registration completion * * Boot a remote processor (i.e. load its firmware, power it on, ...). * @@ -1016,7 +1080,7 @@ static void rproc_crash_handler_work(struct work_struct *work) * * Returns 0 on success, and an appropriate error value otherwise. */ -static int __rproc_boot(struct rproc *rproc, bool wait) +static int __rproc_boot(struct rproc *rproc) { const struct firmware *firmware_p; struct device *dev; @@ -1050,10 +1114,6 @@ static int __rproc_boot(struct rproc *rproc, bool wait) goto downref_rproc; } - /* if rproc virtio is not yet configured, wait */ - if (wait) - wait_for_completion(&rproc->firmware_loading_complete); - ret = rproc_fw_boot(rproc, firmware_p); release_firmware(firmware_p); @@ -1072,22 +1132,11 @@ unlock_mutex: */ int rproc_boot(struct rproc *rproc) { - return __rproc_boot(rproc, true); + return __rproc_boot(rproc); } EXPORT_SYMBOL(rproc_boot); /** - * rproc_boot_nowait() - boot a remote processor - * @rproc: handle of a remote processor - * - * Same as rproc_boot() but don't wait for rproc registration completion - */ -int rproc_boot_nowait(struct rproc *rproc) -{ - return __rproc_boot(rproc, false); -} - -/** * rproc_shutdown() - power off the remote processor * @rproc: the remote processor * @@ -1121,6 +1170,9 @@ void rproc_shutdown(struct rproc *rproc) if (!atomic_dec_and_test(&rproc->power)) goto out; + /* remove any subdevices for the remote processor */ + rproc_remove_subdevices(rproc); + /* power off the remote processor */ ret = rproc->ops->stop(rproc); if (ret) { @@ -1233,9 +1285,6 @@ int rproc_add(struct rproc *rproc) dev_info(dev, "%s is available\n", rproc->name); - dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n"); - dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n"); - /* create debugfs entries */ rproc_create_debug_dir(rproc); ret = rproc_add_virtio_devices(rproc); @@ -1273,6 +1322,7 @@ static void rproc_type_release(struct device *dev) if (rproc->index >= 0) ida_simple_remove(&rproc_dev_index, rproc->index); + kfree(rproc->firmware); kfree(rproc); } @@ -1310,31 +1360,31 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, { struct rproc *rproc; char *p, *template = "rproc-%s-fw"; - int name_len = 0; + int name_len; if (!dev || !name || !ops) return NULL; - if (!firmware) + if (!firmware) { /* - * Make room for default firmware name (minus %s plus '\0'). * If the caller didn't pass in a firmware name then - * construct a default name. We're already glomming 'len' - * bytes onto the end of the struct rproc allocation, so do - * a few more for the default firmware name (but only if - * the caller doesn't pass one). + * construct a default name. */ name_len = strlen(name) + strlen(template) - 2 + 1; - - rproc = kzalloc(sizeof(*rproc) + len + name_len, GFP_KERNEL); - if (!rproc) - return NULL; - - if (!firmware) { - p = (char *)rproc + sizeof(struct rproc) + len; + p = kmalloc(name_len, GFP_KERNEL); + if (!p) + return NULL; snprintf(p, name_len, template, name); } else { - p = (char *)firmware; + p = kstrdup(firmware, GFP_KERNEL); + if (!p) + return NULL; + } + + rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL); + if (!rproc) { + kfree(p); + return NULL; } rproc->firmware = p; @@ -1346,6 +1396,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, device_initialize(&rproc->dev); rproc->dev.parent = dev; rproc->dev.type = &rproc_type; + rproc->dev.class = &rproc_class; /* Assign a unique device index and name */ rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL); @@ -1370,6 +1421,7 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, INIT_LIST_HEAD(&rproc->mappings); INIT_LIST_HEAD(&rproc->traces); INIT_LIST_HEAD(&rproc->rvdevs); + INIT_LIST_HEAD(&rproc->subdevs); INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work); init_completion(&rproc->crash_comp); @@ -1428,8 +1480,6 @@ EXPORT_SYMBOL(rproc_put); */ int rproc_del(struct rproc *rproc) { - struct rproc_vdev *rvdev, *tmp; - if (!rproc) return -EINVAL; @@ -1441,10 +1491,6 @@ int rproc_del(struct rproc *rproc) if (rproc->auto_boot) rproc_shutdown(rproc); - /* clean up remote vdev entries */ - list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node) - rproc_remove_virtio_dev(rvdev); - /* the rproc is downref'ed as soon as it's removed from the klist */ mutex_lock(&rproc_list_mutex); list_del(&rproc->node); @@ -1457,6 +1503,36 @@ int rproc_del(struct rproc *rproc) EXPORT_SYMBOL(rproc_del); /** + * rproc_add_subdev() - add a subdevice to a remoteproc + * @rproc: rproc handle to add the subdevice to + * @subdev: subdev handle to register + * @probe: function to call when the rproc boots + * @remove: function to call when the rproc shuts down + */ +void rproc_add_subdev(struct rproc *rproc, + struct rproc_subdev *subdev, + int (*probe)(struct rproc_subdev *subdev), + void (*remove)(struct rproc_subdev *subdev)) +{ + subdev->probe = probe; + subdev->remove = remove; + + list_add_tail(&subdev->node, &rproc->subdevs); +} +EXPORT_SYMBOL(rproc_add_subdev); + +/** + * rproc_remove_subdev() - remove a subdevice from a remoteproc + * @rproc: rproc handle to remove the subdevice from + * @subdev: subdev handle, previously registered with rproc_add_subdev() + */ +void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev) +{ + list_del(&subdev->node); +} +EXPORT_SYMBOL(rproc_remove_subdev); + +/** * rproc_report_crash() - rproc crash reporter function * @rproc: remote processor * @type: crash type @@ -1484,6 +1560,7 @@ EXPORT_SYMBOL(rproc_report_crash); static int __init remoteproc_init(void) { + rproc_init_sysfs(); rproc_init_debugfs(); return 0; @@ -1495,6 +1572,7 @@ static void __exit remoteproc_exit(void) ida_destroy(&rproc_dev_index); rproc_exit_debugfs(); + rproc_exit_sysfs(); } module_exit(remoteproc_exit); diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 374797206c79..1c122e230cec 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -59,75 +59,6 @@ static const struct file_operations trace_rproc_ops = { .llseek = generic_file_llseek, }; -/* - * A state-to-string lookup table, for exposing a human readable state - * via debugfs. Always keep in sync with enum rproc_state - */ -static const char * const rproc_state_string[] = { - "offline", - "suspended", - "running", - "crashed", - "invalid", -}; - -/* expose the state of the remote processor via debugfs */ -static ssize_t rproc_state_read(struct file *filp, char __user *userbuf, - size_t count, loff_t *ppos) -{ - struct rproc *rproc = filp->private_data; - unsigned int state; - char buf[30]; - int i; - - state = rproc->state > RPROC_LAST ? RPROC_LAST : rproc->state; - - i = scnprintf(buf, 30, "%.28s (%d)\n", rproc_state_string[state], - rproc->state); - - return simple_read_from_buffer(userbuf, count, ppos, buf, i); -} - -static ssize_t rproc_state_write(struct file *filp, const char __user *userbuf, - size_t count, loff_t *ppos) -{ - struct rproc *rproc = filp->private_data; - char buf[10]; - int ret; - - if (count > sizeof(buf) || count <= 0) - return -EINVAL; - - ret = copy_from_user(buf, userbuf, count); - if (ret) - return -EFAULT; - - if (buf[count - 1] == '\n') - buf[count - 1] = '\0'; - - if (!strncmp(buf, "start", count)) { - ret = rproc_boot(rproc); - if (ret) { - dev_err(&rproc->dev, "Boot failed: %d\n", ret); - return ret; - } - } else if (!strncmp(buf, "stop", count)) { - rproc_shutdown(rproc); - } else { - dev_err(&rproc->dev, "Unrecognised option: %s\n", buf); - return -EINVAL; - } - - return count; -} - -static const struct file_operations rproc_state_ops = { - .read = rproc_state_read, - .write = rproc_state_write, - .open = simple_open, - .llseek = generic_file_llseek, -}; - /* expose the name of the remote processor via debugfs */ static ssize_t rproc_name_read(struct file *filp, char __user *userbuf, size_t count, loff_t *ppos) @@ -265,8 +196,6 @@ void rproc_create_debug_dir(struct rproc *rproc) debugfs_create_file("name", 0400, rproc->dbg_dir, rproc, &rproc_name_ops); - debugfs_create_file("state", 0400, rproc->dbg_dir, - rproc, &rproc_state_ops); debugfs_create_file("recovery", 0400, rproc->dbg_dir, rproc, &rproc_recovery_ops); } diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index 4cf93ca2816e..1e9e5b3f021c 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -49,6 +49,7 @@ struct rproc_fw_ops { void rproc_release(struct kref *kref); irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id); int rproc_boot_nowait(struct rproc *rproc); +void rproc_vdev_release(struct kref *ref); /* from remoteproc_virtio.c */ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id); @@ -63,6 +64,11 @@ void rproc_create_debug_dir(struct rproc *rproc); void rproc_init_debugfs(void); void rproc_exit_debugfs(void); +/* from remoteproc_sysfs.c */ +extern struct class rproc_class; +int rproc_init_sysfs(void); +void rproc_exit_sysfs(void); + void rproc_free_vring(struct rproc_vring *rvring); int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c new file mode 100644 index 000000000000..bc5b0e00efb1 --- /dev/null +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -0,0 +1,151 @@ +/* + * Remote Processor Framework + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/remoteproc.h> + +#include "remoteproc_internal.h" + +#define to_rproc(d) container_of(d, struct rproc, dev) + +/* Expose the loaded / running firmware name via sysfs */ +static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rproc *rproc = to_rproc(dev); + + return sprintf(buf, "%s\n", rproc->firmware); +} + +/* Change firmware name via sysfs */ +static ssize_t firmware_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rproc *rproc = to_rproc(dev); + char *p; + int err, len = count; + + err = mutex_lock_interruptible(&rproc->lock); + if (err) { + dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, err); + return -EINVAL; + } + + if (rproc->state != RPROC_OFFLINE) { + dev_err(dev, "can't change firmware while running\n"); + err = -EBUSY; + goto out; + } + + len = strcspn(buf, "\n"); + + p = kstrndup(buf, len, GFP_KERNEL); + if (!p) { + err = -ENOMEM; + goto out; + } + + kfree(rproc->firmware); + rproc->firmware = p; +out: + mutex_unlock(&rproc->lock); + + return err ? err : count; +} +static DEVICE_ATTR_RW(firmware); + +/* + * A state-to-string lookup table, for exposing a human readable state + * via sysfs. Always keep in sync with enum rproc_state + */ +static const char * const rproc_state_string[] = { + [RPROC_OFFLINE] = "offline", + [RPROC_SUSPENDED] = "suspended", + [RPROC_RUNNING] = "running", + [RPROC_CRASHED] = "crashed", + [RPROC_LAST] = "invalid", +}; + +/* Expose the state of the remote processor via sysfs */ +static ssize_t state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rproc *rproc = to_rproc(dev); + unsigned int state; + + state = rproc->state > RPROC_LAST ? RPROC_LAST : rproc->state; + return sprintf(buf, "%s\n", rproc_state_string[state]); +} + +/* Change remote processor state via sysfs */ +static ssize_t state_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rproc *rproc = to_rproc(dev); + int ret = 0; + + if (sysfs_streq(buf, "start")) { + if (rproc->state == RPROC_RUNNING) + return -EBUSY; + + ret = rproc_boot(rproc); + if (ret) + dev_err(&rproc->dev, "Boot failed: %d\n", ret); + } else if (sysfs_streq(buf, "stop")) { + if (rproc->state != RPROC_RUNNING) + return -EINVAL; + + rproc_shutdown(rproc); + } else { + dev_err(&rproc->dev, "Unrecognised option: %s\n", buf); + ret = -EINVAL; + } + return ret ? ret : count; +} +static DEVICE_ATTR_RW(state); + +static struct attribute *rproc_attrs[] = { + &dev_attr_firmware.attr, + &dev_attr_state.attr, + NULL +}; + +static const struct attribute_group rproc_devgroup = { + .attrs = rproc_attrs +}; + +static const struct attribute_group *rproc_devgroups[] = { + &rproc_devgroup, + NULL +}; + +struct class rproc_class = { + .name = "remoteproc", + .dev_groups = rproc_devgroups, +}; + +int __init rproc_init_sysfs(void) +{ + /* create remoteproc device class for sysfs */ + int err = class_register(&rproc_class); + + if (err) + pr_err("remoteproc: unable to register class\n"); + return err; +} + +void __exit rproc_exit_sysfs(void) +{ + class_unregister(&rproc_class); +} diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 01870a16d6d2..364411fb7734 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -79,7 +79,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, struct rproc_vring *rvring; struct virtqueue *vq; void *addr; - int len, size, ret; + int len, size; /* we're temporarily limited to two virtqueues per rvdev */ if (id >= ARRAY_SIZE(rvdev->vring)) @@ -88,10 +88,6 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, if (!name) return NULL; - ret = rproc_alloc_vring(rvdev, id); - if (ret) - return ERR_PTR(ret); - rvring = &rvdev->vring[id]; addr = rvring->va; len = rvring->len; @@ -130,7 +126,6 @@ static void __rproc_virtio_del_vqs(struct virtio_device *vdev) rvring = vq->priv; rvring->vq = NULL; vring_del_virtqueue(vq); - rproc_free_vring(rvring); } } @@ -282,14 +277,13 @@ static const struct virtio_config_ops rproc_virtio_config_ops = { * Never call this function directly; it will be called by the driver * core when needed. */ -static void rproc_vdev_release(struct device *dev) +static void rproc_virtio_dev_release(struct device *dev) { struct virtio_device *vdev = dev_to_virtio(dev); struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); struct rproc *rproc = vdev_to_rproc(vdev); - list_del(&rvdev->node); - kfree(rvdev); + kref_put(&rvdev->refcount, rproc_vdev_release); put_device(&rproc->dev); } @@ -313,7 +307,7 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) vdev->id.device = id, vdev->config = &rproc_virtio_config_ops, vdev->dev.parent = dev; - vdev->dev.release = rproc_vdev_release; + vdev->dev.release = rproc_virtio_dev_release; /* * We're indirectly making a non-temporary copy of the rproc pointer @@ -325,6 +319,9 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) */ get_device(&rproc->dev); + /* Reference the vdev and vring allocations */ + kref_get(&rvdev->refcount); + ret = register_virtio_device(vdev); if (ret) { put_device(&rproc->dev); diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c index da4e152e9733..f21787b602e3 100644 --- a/drivers/remoteproc/st_remoteproc.c +++ b/drivers/remoteproc/st_remoteproc.c @@ -107,7 +107,7 @@ static int st_rproc_stop(struct rproc *rproc) return sw_err ?: pwr_err; } -static struct rproc_ops st_rproc_ops = { +static const struct rproc_ops st_rproc_ops = { .start = st_rproc_start, .stop = st_rproc_stop, }; diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c new file mode 100644 index 000000000000..6cfd862f945b --- /dev/null +++ b/drivers/remoteproc/st_slim_rproc.c @@ -0,0 +1,364 @@ +/* + * SLIM core rproc driver + * + * Copyright (C) 2016 STMicroelectronics + * + * Author: Peter Griffin <peter.griffin@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/remoteproc.h> +#include <linux/remoteproc/st_slim_rproc.h> +#include "remoteproc_internal.h" + +/* SLIM core registers */ +#define SLIM_ID_OFST 0x0 +#define SLIM_VER_OFST 0x4 + +#define SLIM_EN_OFST 0x8 +#define SLIM_EN_RUN BIT(0) + +#define SLIM_CLK_GATE_OFST 0xC +#define SLIM_CLK_GATE_DIS BIT(0) +#define SLIM_CLK_GATE_RESET BIT(2) + +#define SLIM_SLIM_PC_OFST 0x20 + +/* DMEM registers */ +#define SLIM_REV_ID_OFST 0x0 +#define SLIM_REV_ID_MIN_MASK GENMASK(15, 8) +#define SLIM_REV_ID_MIN(id) ((id & SLIM_REV_ID_MIN_MASK) >> 8) +#define SLIM_REV_ID_MAJ_MASK GENMASK(23, 16) +#define SLIM_REV_ID_MAJ(id) ((id & SLIM_REV_ID_MAJ_MASK) >> 16) + + +/* peripherals registers */ +#define SLIM_STBUS_SYNC_OFST 0xF88 +#define SLIM_STBUS_SYNC_DIS BIT(0) + +#define SLIM_INT_SET_OFST 0xFD4 +#define SLIM_INT_CLR_OFST 0xFD8 +#define SLIM_INT_MASK_OFST 0xFDC + +#define SLIM_CMD_CLR_OFST 0xFC8 +#define SLIM_CMD_MASK_OFST 0xFCC + +static const char *mem_names[ST_SLIM_MEM_MAX] = { + [ST_SLIM_DMEM] = "dmem", + [ST_SLIM_IMEM] = "imem", +}; + +static int slim_clk_get(struct st_slim_rproc *slim_rproc, struct device *dev) +{ + int clk, err; + + for (clk = 0; clk < ST_SLIM_MAX_CLK; clk++) { + slim_rproc->clks[clk] = of_clk_get(dev->of_node, clk); + if (IS_ERR(slim_rproc->clks[clk])) { + err = PTR_ERR(slim_rproc->clks[clk]); + if (err == -EPROBE_DEFER) + goto err_put_clks; + slim_rproc->clks[clk] = NULL; + break; + } + } + + return 0; + +err_put_clks: + while (--clk >= 0) + clk_put(slim_rproc->clks[clk]); + + return err; +} + +static void slim_clk_disable(struct st_slim_rproc *slim_rproc) +{ + int clk; + + for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) + clk_disable_unprepare(slim_rproc->clks[clk]); +} + +static int slim_clk_enable(struct st_slim_rproc *slim_rproc) +{ + int clk, ret; + + for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) { + ret = clk_prepare_enable(slim_rproc->clks[clk]); + if (ret) + goto err_disable_clks; + } + + return 0; + +err_disable_clks: + while (--clk >= 0) + clk_disable_unprepare(slim_rproc->clks[clk]); + + return ret; +} + +/* + * Remoteproc slim specific device handlers + */ +static int slim_rproc_start(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + struct st_slim_rproc *slim_rproc = rproc->priv; + unsigned long hw_id, hw_ver, fw_rev; + u32 val; + + /* disable CPU pipeline clock & reset CPU pipeline */ + val = SLIM_CLK_GATE_DIS | SLIM_CLK_GATE_RESET; + writel(val, slim_rproc->slimcore + SLIM_CLK_GATE_OFST); + + /* disable SLIM core STBus sync */ + writel(SLIM_STBUS_SYNC_DIS, slim_rproc->peri + SLIM_STBUS_SYNC_OFST); + + /* enable cpu pipeline clock */ + writel(!SLIM_CLK_GATE_DIS, + slim_rproc->slimcore + SLIM_CLK_GATE_OFST); + + /* clear int & cmd mailbox */ + writel(~0U, slim_rproc->peri + SLIM_INT_CLR_OFST); + writel(~0U, slim_rproc->peri + SLIM_CMD_CLR_OFST); + + /* enable all channels cmd & int */ + writel(~0U, slim_rproc->peri + SLIM_INT_MASK_OFST); + writel(~0U, slim_rproc->peri + SLIM_CMD_MASK_OFST); + + /* enable cpu */ + writel(SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST); + + hw_id = readl_relaxed(slim_rproc->slimcore + SLIM_ID_OFST); + hw_ver = readl_relaxed(slim_rproc->slimcore + SLIM_VER_OFST); + + fw_rev = readl(slim_rproc->mem[ST_SLIM_DMEM].cpu_addr + + SLIM_REV_ID_OFST); + + dev_info(dev, "fw rev:%ld.%ld on SLIM %ld.%ld\n", + SLIM_REV_ID_MAJ(fw_rev), SLIM_REV_ID_MIN(fw_rev), + hw_id, hw_ver); + + return 0; +} + +static int slim_rproc_stop(struct rproc *rproc) +{ + struct st_slim_rproc *slim_rproc = rproc->priv; + u32 val; + + /* mask all (cmd & int) channels */ + writel(0UL, slim_rproc->peri + SLIM_INT_MASK_OFST); + writel(0UL, slim_rproc->peri + SLIM_CMD_MASK_OFST); + + /* disable cpu pipeline clock */ + writel(SLIM_CLK_GATE_DIS, slim_rproc->slimcore + SLIM_CLK_GATE_OFST); + + writel(!SLIM_EN_RUN, slim_rproc->slimcore + SLIM_EN_OFST); + + val = readl(slim_rproc->slimcore + SLIM_EN_OFST); + if (val & SLIM_EN_RUN) + dev_warn(&rproc->dev, "Failed to disable SLIM"); + + dev_dbg(&rproc->dev, "slim stopped\n"); + + return 0; +} + +static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len) +{ + struct st_slim_rproc *slim_rproc = rproc->priv; + void *va = NULL; + int i; + + for (i = 0; i < ST_SLIM_MEM_MAX; i++) { + if (da != slim_rproc->mem[i].bus_addr) + continue; + + if (len <= slim_rproc->mem[i].size) { + /* __force to make sparse happy with type conversion */ + va = (__force void *)slim_rproc->mem[i].cpu_addr; + break; + } + } + + dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%p\n", da, len, va); + + return va; +} + +static const struct rproc_ops slim_rproc_ops = { + .start = slim_rproc_start, + .stop = slim_rproc_stop, + .da_to_va = slim_rproc_da_to_va, +}; + +/* + * Firmware handler operations: sanity, boot address, load ... + */ + +static struct resource_table empty_rsc_tbl = { + .ver = 1, + .num = 0, +}; + +static struct resource_table *slim_rproc_find_rsc_table(struct rproc *rproc, + const struct firmware *fw, + int *tablesz) +{ + *tablesz = sizeof(empty_rsc_tbl); + return &empty_rsc_tbl; +} + +static struct rproc_fw_ops slim_rproc_fw_ops = { + .find_rsc_table = slim_rproc_find_rsc_table, +}; + +/** + * st_slim_rproc_alloc() - allocate and initialise slim rproc + * @pdev: Pointer to the platform_device struct + * @fw_name: Name of firmware for rproc to use + * + * Function for allocating and initialising a slim rproc for use by + * device drivers whose IP is based around the SLIM core. It + * obtains and enables any clocks required by the SLIM core and also + * ioremaps the various IO. + * + * Returns st_slim_rproc pointer or PTR_ERR() on error. + */ + +struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev, + char *fw_name) +{ + struct device *dev = &pdev->dev; + struct st_slim_rproc *slim_rproc; + struct device_node *np = dev->of_node; + struct rproc *rproc; + struct resource *res; + int err, i; + const struct rproc_fw_ops *elf_ops; + + if (!fw_name) + return ERR_PTR(-EINVAL); + + if (!of_device_is_compatible(np, "st,slim-rproc")) + return ERR_PTR(-EINVAL); + + rproc = rproc_alloc(dev, np->name, &slim_rproc_ops, + fw_name, sizeof(*slim_rproc)); + if (!rproc) + return ERR_PTR(-ENOMEM); + + rproc->has_iommu = false; + + slim_rproc = rproc->priv; + slim_rproc->rproc = rproc; + + elf_ops = rproc->fw_ops; + /* Use some generic elf ops */ + slim_rproc_fw_ops.load = elf_ops->load; + slim_rproc_fw_ops.sanity_check = elf_ops->sanity_check; + + rproc->fw_ops = &slim_rproc_fw_ops; + + /* get imem and dmem */ + for (i = 0; i < ARRAY_SIZE(mem_names); i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mem_names[i]); + + slim_rproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(slim_rproc->mem[i].cpu_addr)) { + dev_err(&pdev->dev, "devm_ioremap_resource failed\n"); + err = PTR_ERR(slim_rproc->mem[i].cpu_addr); + goto err; + } + slim_rproc->mem[i].bus_addr = res->start; + slim_rproc->mem[i].size = resource_size(res); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimcore"); + slim_rproc->slimcore = devm_ioremap_resource(dev, res); + if (IS_ERR(slim_rproc->slimcore)) { + dev_err(&pdev->dev, "failed to ioremap slimcore IO\n"); + err = PTR_ERR(slim_rproc->slimcore); + goto err; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "peripherals"); + slim_rproc->peri = devm_ioremap_resource(dev, res); + if (IS_ERR(slim_rproc->peri)) { + dev_err(&pdev->dev, "failed to ioremap peripherals IO\n"); + err = PTR_ERR(slim_rproc->peri); + goto err; + } + + err = slim_clk_get(slim_rproc, dev); + if (err) + goto err; + + err = slim_clk_enable(slim_rproc); + if (err) { + dev_err(dev, "Failed to enable clocks\n"); + goto err_clk_put; + } + + /* Register as a remoteproc device */ + err = rproc_add(rproc); + if (err) { + dev_err(dev, "registration of slim remoteproc failed\n"); + goto err_clk_dis; + } + + return slim_rproc; + +err_clk_dis: + slim_clk_disable(slim_rproc); +err_clk_put: + for (i = 0; i < ST_SLIM_MAX_CLK && slim_rproc->clks[i]; i++) + clk_put(slim_rproc->clks[i]); +err: + rproc_free(rproc); + return ERR_PTR(err); +} +EXPORT_SYMBOL(st_slim_rproc_alloc); + +/** + * st_slim_rproc_put() - put slim rproc resources + * @slim_rproc: Pointer to the st_slim_rproc struct + * + * Function for calling respective _put() functions on slim_rproc resources. + * + */ +void st_slim_rproc_put(struct st_slim_rproc *slim_rproc) +{ + int clk; + + if (!slim_rproc) + return; + + slim_clk_disable(slim_rproc); + + for (clk = 0; clk < ST_SLIM_MAX_CLK && slim_rproc->clks[clk]; clk++) + clk_put(slim_rproc->clks[clk]); + + rproc_del(slim_rproc->rproc); + rproc_free(slim_rproc->rproc); +} +EXPORT_SYMBOL(st_slim_rproc_put); + +MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>"); +MODULE_DESCRIPTION("STMicroelectronics SLIM core rproc driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c deleted file mode 100644 index 03d69a9a3c5b..000000000000 --- a/drivers/remoteproc/ste_modem_rproc.c +++ /dev/null @@ -1,342 +0,0 @@ -/* - * Copyright (C) ST-Ericsson AB 2012 - * Author: Sjur Brændeland <sjur.brandeland@stericsson.com> - * License terms: GNU General Public License (GPL), version 2 - */ - -#include <linux/module.h> -#include <linux/dma-mapping.h> -#include <linux/remoteproc.h> -#include <linux/ste_modem_shm.h> -#include "remoteproc_internal.h" - -#define SPROC_FW_SIZE (50 * 4096) -#define SPROC_MAX_TOC_ENTRIES 32 -#define SPROC_MAX_NOTIFY_ID 14 -#define SPROC_RESOURCE_NAME "rsc-table" -#define SPROC_MODEM_NAME "ste-modem" -#define SPROC_MODEM_FIRMWARE SPROC_MODEM_NAME "-fw.bin" - -#define sproc_dbg(sproc, fmt, ...) \ - dev_dbg(&sproc->mdev->pdev.dev, fmt, ##__VA_ARGS__) -#define sproc_err(sproc, fmt, ...) \ - dev_err(&sproc->mdev->pdev.dev, fmt, ##__VA_ARGS__) - -/* STE-modem control structure */ -struct sproc { - struct rproc *rproc; - struct ste_modem_device *mdev; - int error; - void *fw_addr; - size_t fw_size; - dma_addr_t fw_dma_addr; -}; - -/* STE-Modem firmware entry */ -struct ste_toc_entry { - __le32 start; - __le32 size; - __le32 flags; - __le32 entry_point; - __le32 load_addr; - char name[12]; -}; - -/* - * The Table Of Content is located at the start of the firmware image and - * at offset zero in the shared memory region. The resource table typically - * contains the initial boot image (boot strap) and other information elements - * such as remoteproc resource table. Each entry is identified by a unique - * name. - */ -struct ste_toc { - struct ste_toc_entry table[SPROC_MAX_TOC_ENTRIES]; -}; - -/* Loads the firmware to shared memory. */ -static int sproc_load_segments(struct rproc *rproc, const struct firmware *fw) -{ - struct sproc *sproc = rproc->priv; - - memcpy(sproc->fw_addr, fw->data, fw->size); - - return 0; -} - -/* Find the entry for resource table in the Table of Content */ -static const struct ste_toc_entry *sproc_find_rsc_entry(const void *data) -{ - int i; - const struct ste_toc *toc = data; - - /* Search the table for the resource table */ - for (i = 0; i < SPROC_MAX_TOC_ENTRIES && - toc->table[i].start != 0xffffffff; i++) { - if (!strncmp(toc->table[i].name, SPROC_RESOURCE_NAME, - sizeof(toc->table[i].name))) - return &toc->table[i]; - } - - return NULL; -} - -/* Find the resource table inside the remote processor's firmware. */ -static struct resource_table * -sproc_find_rsc_table(struct rproc *rproc, const struct firmware *fw, - int *tablesz) -{ - struct sproc *sproc = rproc->priv; - struct resource_table *table; - const struct ste_toc_entry *entry; - - if (!fw) - return NULL; - - entry = sproc_find_rsc_entry(fw->data); - if (!entry) { - sproc_err(sproc, "resource table not found in fw\n"); - return NULL; - } - - table = (void *)(fw->data + entry->start); - - /* sanity check size and offset of resource table */ - if (entry->start > SPROC_FW_SIZE || - entry->size > SPROC_FW_SIZE || - fw->size > SPROC_FW_SIZE || - entry->start + entry->size > fw->size || - sizeof(struct resource_table) > entry->size) { - sproc_err(sproc, "bad size of fw or resource table\n"); - return NULL; - } - - /* we don't support any version beyond the first */ - if (table->ver != 1) { - sproc_err(sproc, "unsupported fw ver: %d\n", table->ver); - return NULL; - } - - /* make sure reserved bytes are zeroes */ - if (table->reserved[0] || table->reserved[1]) { - sproc_err(sproc, "non zero reserved bytes\n"); - return NULL; - } - - /* make sure the offsets array isn't truncated */ - if (table->num > SPROC_MAX_TOC_ENTRIES || - table->num * sizeof(table->offset[0]) + - sizeof(struct resource_table) > entry->size) { - sproc_err(sproc, "resource table incomplete\n"); - return NULL; - } - - /* If the fw size has grown, release the previous fw allocation */ - if (SPROC_FW_SIZE < fw->size) { - sproc_err(sproc, "Insufficient space for fw (%d < %zd)\n", - SPROC_FW_SIZE, fw->size); - return NULL; - } - - sproc->fw_size = fw->size; - *tablesz = entry->size; - - return table; -} - -/* Find the resource table inside the remote processor's firmware. */ -static struct resource_table * -sproc_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw) -{ - struct sproc *sproc = rproc->priv; - const struct ste_toc_entry *entry; - - if (!fw || !sproc->fw_addr) - return NULL; - - entry = sproc_find_rsc_entry(sproc->fw_addr); - if (!entry) { - sproc_err(sproc, "resource table not found in fw\n"); - return NULL; - } - - return sproc->fw_addr + entry->start; -} - -/* STE modem firmware handler operations */ -static const struct rproc_fw_ops sproc_fw_ops = { - .load = sproc_load_segments, - .find_rsc_table = sproc_find_rsc_table, - .find_loaded_rsc_table = sproc_find_loaded_rsc_table, -}; - -/* Kick the modem with specified notification id */ -static void sproc_kick(struct rproc *rproc, int vqid) -{ - struct sproc *sproc = rproc->priv; - - sproc_dbg(sproc, "kick vqid:%d\n", vqid); - - /* - * We need different notification IDs for RX and TX so add - * an offset on TX notification IDs. - */ - sproc->mdev->ops.kick(sproc->mdev, vqid + SPROC_MAX_NOTIFY_ID); -} - -/* Received a kick from a modem, kick the virtqueue */ -static void sproc_kick_callback(struct ste_modem_device *mdev, int vqid) -{ - struct sproc *sproc = mdev->drv_data; - - if (rproc_vq_interrupt(sproc->rproc, vqid) == IRQ_NONE) - sproc_dbg(sproc, "no message was found in vqid %d\n", vqid); -} - -static struct ste_modem_dev_cb sproc_dev_cb = { - .kick = sproc_kick_callback, -}; - -/* Start the STE modem */ -static int sproc_start(struct rproc *rproc) -{ - struct sproc *sproc = rproc->priv; - int i, err; - - sproc_dbg(sproc, "start ste-modem\n"); - - /* Sanity test the max_notifyid */ - if (rproc->max_notifyid > SPROC_MAX_NOTIFY_ID) { - sproc_err(sproc, "Notification IDs too high:%d\n", - rproc->max_notifyid); - return -EINVAL; - } - - /* Subscribe to notifications */ - for (i = 0; i <= rproc->max_notifyid; i++) { - err = sproc->mdev->ops.kick_subscribe(sproc->mdev, i); - if (err) { - sproc_err(sproc, - "subscription of kicks failed:%d\n", err); - return err; - } - } - - /* Request modem start-up*/ - return sproc->mdev->ops.power(sproc->mdev, true); -} - -/* Stop the STE modem */ -static int sproc_stop(struct rproc *rproc) -{ - struct sproc *sproc = rproc->priv; - - sproc_dbg(sproc, "stop ste-modem\n"); - - return sproc->mdev->ops.power(sproc->mdev, false); -} - -static struct rproc_ops sproc_ops = { - .start = sproc_start, - .stop = sproc_stop, - .kick = sproc_kick, -}; - -/* STE modem device is unregistered */ -static int sproc_drv_remove(struct platform_device *pdev) -{ - struct ste_modem_device *mdev = - container_of(pdev, struct ste_modem_device, pdev); - struct sproc *sproc = mdev->drv_data; - - sproc_dbg(sproc, "remove ste-modem\n"); - - /* Reset device callback functions */ - sproc->mdev->ops.setup(sproc->mdev, NULL); - - /* Unregister as remoteproc device */ - rproc_del(sproc->rproc); - dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE, - sproc->fw_addr, sproc->fw_dma_addr); - rproc_free(sproc->rproc); - - mdev->drv_data = NULL; - - return 0; -} - -/* Handle probe of a modem device */ -static int sproc_probe(struct platform_device *pdev) -{ - struct ste_modem_device *mdev = - container_of(pdev, struct ste_modem_device, pdev); - struct sproc *sproc; - struct rproc *rproc; - int err; - - dev_dbg(&mdev->pdev.dev, "probe ste-modem\n"); - - if (!mdev->ops.setup || !mdev->ops.kick || !mdev->ops.kick_subscribe || - !mdev->ops.power) { - dev_err(&mdev->pdev.dev, "invalid mdev ops\n"); - return -EINVAL; - } - - rproc = rproc_alloc(&mdev->pdev.dev, mdev->pdev.name, &sproc_ops, - SPROC_MODEM_FIRMWARE, sizeof(*sproc)); - if (!rproc) - return -ENOMEM; - - sproc = rproc->priv; - sproc->mdev = mdev; - sproc->rproc = rproc; - rproc->has_iommu = false; - mdev->drv_data = sproc; - - /* Provide callback functions to modem device */ - sproc->mdev->ops.setup(sproc->mdev, &sproc_dev_cb); - - /* Set the STE-modem specific firmware handler */ - rproc->fw_ops = &sproc_fw_ops; - - /* - * STE-modem requires the firmware to be located - * at the start of the shared memory region. So we need to - * reserve space for firmware at the start. - */ - sproc->fw_addr = dma_alloc_coherent(rproc->dev.parent, SPROC_FW_SIZE, - &sproc->fw_dma_addr, - GFP_KERNEL); - if (!sproc->fw_addr) { - sproc_err(sproc, "Cannot allocate memory for fw\n"); - err = -ENOMEM; - goto free_rproc; - } - - /* Register as a remoteproc device */ - err = rproc_add(rproc); - if (err) - goto free_mem; - - return 0; - -free_mem: - dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE, - sproc->fw_addr, sproc->fw_dma_addr); -free_rproc: - /* Reset device data upon error */ - mdev->drv_data = NULL; - rproc_free(rproc); - return err; -} - -static struct platform_driver sproc_driver = { - .driver = { - .name = SPROC_MODEM_NAME, - }, - .probe = sproc_probe, - .remove = sproc_drv_remove, -}; - -module_platform_driver(sproc_driver); -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("STE Modem driver using the Remote Processor Framework"); diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c index 18175d0331fd..1ada0e51fef6 100644 --- a/drivers/remoteproc/wkup_m3_rproc.c +++ b/drivers/remoteproc/wkup_m3_rproc.c @@ -111,7 +111,7 @@ static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, int len) return va; } -static struct rproc_ops wkup_m3_rproc_ops = { +static const struct rproc_ops wkup_m3_rproc_ops = { .start = wkup_m3_rproc_start, .stop = wkup_m3_rproc_stop, .da_to_va = wkup_m3_rproc_da_to_va, diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig index de31c5f14dd9..edc008f55663 100644 --- a/drivers/rpmsg/Kconfig +++ b/drivers/rpmsg/Kconfig @@ -4,10 +4,18 @@ menu "Rpmsg drivers" config RPMSG tristate +config RPMSG_CHAR + tristate "RPMSG device interface" + depends on RPMSG + depends on NET + help + Say Y here to export rpmsg endpoints as device files, usually found + in /dev. They make it possible for user-space programs to send and + receive rpmsg packets. + config RPMSG_QCOM_SMD tristate "Qualcomm Shared Memory Driver (SMD)" depends on QCOM_SMEM - depends on QCOM_SMD=n select RPMSG help Say y here to enable support for the Qualcomm Shared Memory Driver diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile index ae9c9132cf76..fae9a6d548fb 100644 --- a/drivers/rpmsg/Makefile +++ b/drivers/rpmsg/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_RPMSG) += rpmsg_core.o +obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o obj-$(CONFIG_RPMSG_QCOM_SMD) += qcom_smd.o obj-$(CONFIG_RPMSG_VIRTIO) += virtio_rpmsg_bus.o diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 1d4770c02e57..a0a39a8821a3 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -25,6 +25,7 @@ #include <linux/soc/qcom/smem.h> #include <linux/wait.h> #include <linux/rpmsg.h> +#include <linux/rpmsg/qcom_smd.h> #include "rpmsg_internal.h" @@ -116,6 +117,8 @@ static const struct { struct qcom_smd_edge { struct device dev; + const char *name; + struct device_node *of_node; unsigned edge_id; unsigned remote_pid; @@ -820,20 +823,13 @@ qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name) struct qcom_smd_channel *channel; struct qcom_smd_channel *ret = NULL; unsigned long flags; - unsigned state; spin_lock_irqsave(&edge->channels_lock, flags); list_for_each_entry(channel, &edge->channels, list) { - if (strcmp(channel->name, name)) - continue; - - state = GET_RX_CHANNEL_INFO(channel, state); - if (state != SMD_CHANNEL_OPENING && - state != SMD_CHANNEL_OPENED) - continue; - - ret = channel; - break; + if (!strcmp(channel->name, name)) { + ret = channel; + break; + } } spin_unlock_irqrestore(&edge->channels_lock, flags); @@ -923,6 +919,21 @@ static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len) return __qcom_smd_send(qsept->qsch, data, len, false); } +static unsigned int qcom_smd_poll(struct rpmsg_endpoint *ept, + struct file *filp, poll_table *wait) +{ + struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept); + struct qcom_smd_channel *channel = qsept->qsch; + unsigned int mask = 0; + + poll_wait(filp, &channel->fblockread_event, wait); + + if (qcom_smd_get_tx_avail(channel) > 20) + mask |= POLLOUT | POLLWRNORM; + + return mask; +} + /* * Finds the device_node for the smd child interested in this channel. */ @@ -955,8 +966,17 @@ static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = { .destroy_ept = qcom_smd_destroy_ept, .send = qcom_smd_send, .trysend = qcom_smd_trysend, + .poll = qcom_smd_poll, }; +static void qcom_smd_release_device(struct device *dev) +{ + struct rpmsg_device *rpdev = to_rpmsg_device(dev); + struct qcom_smd_device *qsdev = to_smd_device(rpdev); + + kfree(qsdev); +} + /* * Create a smd client device for channel that is being opened. */ @@ -986,10 +1006,27 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel) rpdev->dev.of_node = qcom_smd_match_channel(edge->of_node, channel->name); rpdev->dev.parent = &edge->dev; + rpdev->dev.release = qcom_smd_release_device; return rpmsg_register_device(rpdev); } +static int qcom_smd_create_chrdev(struct qcom_smd_edge *edge) +{ + struct qcom_smd_device *qsdev; + + qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); + if (!qsdev) + return -ENOMEM; + + qsdev->edge = edge; + qsdev->rpdev.ops = &qcom_smd_device_ops; + qsdev->rpdev.dev.parent = &edge->dev; + qsdev->rpdev.dev.release = qcom_smd_release_device; + + return rpmsg_chrdev_register_device(&qsdev->rpdev); +} + /* * Allocate the qcom_smd_channel object for a newly found smd channel, * retrieving and validating the smem items involved. @@ -1254,6 +1291,10 @@ static int qcom_smd_parse_edge(struct device *dev, return -EINVAL; } + ret = of_property_read_string(node, "label", &edge->name); + if (ret < 0) + edge->name = node->name; + irq = irq_of_parse_and_map(node, 0); if (irq < 0) { dev_err(dev, "required smd interrupt missing\n"); @@ -1291,6 +1332,21 @@ static void qcom_smd_edge_release(struct device *dev) kfree(edge); } +static ssize_t rpmsg_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qcom_smd_edge *edge = to_smd_edge(dev); + + return sprintf(buf, "%s\n", edge->name); +} +static DEVICE_ATTR_RO(rpmsg_name); + +static struct attribute *qcom_smd_edge_attrs[] = { + &dev_attr_rpmsg_name.attr, + NULL +}; +ATTRIBUTE_GROUPS(qcom_smd_edge); + /** * qcom_smd_register_edge() - register an edge based on an device_node * @parent: parent device for the edge @@ -1312,6 +1368,7 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, edge->dev.parent = parent; edge->dev.release = qcom_smd_edge_release; + edge->dev.groups = qcom_smd_edge_groups; dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name); ret = device_register(&edge->dev); if (ret) { @@ -1325,6 +1382,12 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, goto unregister_dev; } + ret = qcom_smd_create_chrdev(edge); + if (ret) { + dev_err(&edge->dev, "failed to register chrdev for edge\n"); + goto unregister_dev; + } + schedule_work(&edge->scan_work); return edge; diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c new file mode 100644 index 000000000000..0ca2ccc09ca6 --- /dev/null +++ b/drivers/rpmsg/rpmsg_char.c @@ -0,0 +1,584 @@ +/* + * Copyright (c) 2016, Linaro Ltd. + * Copyright (c) 2012, Michal Simek <monstr@monstr.eu> + * Copyright (c) 2012, PetaLogix + * Copyright (c) 2011, Texas Instruments, Inc. + * Copyright (c) 2011, Google, Inc. + * + * Based on rpmsg performance statistics driver by Michal Simek, which in turn + * was based on TI & Google OMX rpmsg driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/idr.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/poll.h> +#include <linux/rpmsg.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <uapi/linux/rpmsg.h> + +#include "rpmsg_internal.h" + +#define RPMSG_DEV_MAX (MINORMASK + 1) + +static dev_t rpmsg_major; +static struct class *rpmsg_class; + +static DEFINE_IDA(rpmsg_ctrl_ida); +static DEFINE_IDA(rpmsg_ept_ida); +static DEFINE_IDA(rpmsg_minor_ida); + +#define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev) +#define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev) + +#define dev_to_ctrldev(dev) container_of(dev, struct rpmsg_ctrldev, dev) +#define cdev_to_ctrldev(i_cdev) container_of(i_cdev, struct rpmsg_ctrldev, cdev) + +/** + * struct rpmsg_ctrldev - control device for instantiating endpoint devices + * @rpdev: underlaying rpmsg device + * @cdev: cdev for the ctrl device + * @dev: device for the ctrl device + */ +struct rpmsg_ctrldev { + struct rpmsg_device *rpdev; + struct cdev cdev; + struct device dev; +}; + +/** + * struct rpmsg_eptdev - endpoint device context + * @dev: endpoint device + * @cdev: cdev for the endpoint device + * @rpdev: underlaying rpmsg device + * @chinfo: info used to open the endpoint + * @ept_lock: synchronization of @ept modifications + * @ept: rpmsg endpoint reference, when open + * @queue_lock: synchronization of @queue operations + * @queue: incoming message queue + * @readq: wait object for incoming queue + */ +struct rpmsg_eptdev { + struct device dev; + struct cdev cdev; + + struct rpmsg_device *rpdev; + struct rpmsg_channel_info chinfo; + + struct mutex ept_lock; + struct rpmsg_endpoint *ept; + + spinlock_t queue_lock; + struct sk_buff_head queue; + wait_queue_head_t readq; +}; + +static int rpmsg_eptdev_destroy(struct device *dev, void *data) +{ + struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev); + + mutex_lock(&eptdev->ept_lock); + if (eptdev->ept) { + rpmsg_destroy_ept(eptdev->ept); + eptdev->ept = NULL; + } + mutex_unlock(&eptdev->ept_lock); + + /* wake up any blocked readers */ + wake_up_interruptible(&eptdev->readq); + + device_del(&eptdev->dev); + put_device(&eptdev->dev); + + return 0; +} + +static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len, + void *priv, u32 addr) +{ + struct rpmsg_eptdev *eptdev = priv; + struct sk_buff *skb; + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + memcpy(skb_put(skb, len), buf, len); + + spin_lock(&eptdev->queue_lock); + skb_queue_tail(&eptdev->queue, skb); + spin_unlock(&eptdev->queue_lock); + + /* wake up any blocking processes, waiting for new data */ + wake_up_interruptible(&eptdev->readq); + + return 0; +} + +static int rpmsg_eptdev_open(struct inode *inode, struct file *filp) +{ + struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev); + struct rpmsg_endpoint *ept; + struct rpmsg_device *rpdev = eptdev->rpdev; + struct device *dev = &eptdev->dev; + + get_device(dev); + + ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo); + if (!ept) { + dev_err(dev, "failed to open %s\n", eptdev->chinfo.name); + put_device(dev); + return -EINVAL; + } + + eptdev->ept = ept; + filp->private_data = eptdev; + + return 0; +} + +static int rpmsg_eptdev_release(struct inode *inode, struct file *filp) +{ + struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev); + struct device *dev = &eptdev->dev; + struct sk_buff *skb; + + /* Close the endpoint, if it's not already destroyed by the parent */ + mutex_lock(&eptdev->ept_lock); + if (eptdev->ept) { + rpmsg_destroy_ept(eptdev->ept); + eptdev->ept = NULL; + } + mutex_unlock(&eptdev->ept_lock); + + /* Discard all SKBs */ + while (!skb_queue_empty(&eptdev->queue)) { + skb = skb_dequeue(&eptdev->queue); + kfree_skb(skb); + } + + put_device(dev); + + return 0; +} + +static ssize_t rpmsg_eptdev_read(struct file *filp, char __user *buf, + size_t len, loff_t *f_pos) +{ + struct rpmsg_eptdev *eptdev = filp->private_data; + unsigned long flags; + struct sk_buff *skb; + int use; + + if (!eptdev->ept) + return -EPIPE; + + spin_lock_irqsave(&eptdev->queue_lock, flags); + + /* Wait for data in the queue */ + if (skb_queue_empty(&eptdev->queue)) { + spin_unlock_irqrestore(&eptdev->queue_lock, flags); + + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + /* Wait until we get data or the endpoint goes away */ + if (wait_event_interruptible(eptdev->readq, + !skb_queue_empty(&eptdev->queue) || + !eptdev->ept)) + return -ERESTARTSYS; + + /* We lost the endpoint while waiting */ + if (!eptdev->ept) + return -EPIPE; + + spin_lock_irqsave(&eptdev->queue_lock, flags); + } + + skb = skb_dequeue(&eptdev->queue); + spin_unlock_irqrestore(&eptdev->queue_lock, flags); + if (!skb) + return -EFAULT; + + use = min_t(size_t, len, skb->len); + if (copy_to_user(buf, skb->data, use)) + use = -EFAULT; + + kfree_skb(skb); + + return use; +} + +static ssize_t rpmsg_eptdev_write(struct file *filp, const char __user *buf, + size_t len, loff_t *f_pos) +{ + struct rpmsg_eptdev *eptdev = filp->private_data; + void *kbuf; + int ret; + + kbuf = memdup_user(buf, len); + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); + + if (mutex_lock_interruptible(&eptdev->ept_lock)) { + ret = -ERESTARTSYS; + goto free_kbuf; + } + + if (!eptdev->ept) { + ret = -EPIPE; + goto unlock_eptdev; + } + + if (filp->f_flags & O_NONBLOCK) + ret = rpmsg_trysend(eptdev->ept, kbuf, len); + else + ret = rpmsg_send(eptdev->ept, kbuf, len); + +unlock_eptdev: + mutex_unlock(&eptdev->ept_lock); + +free_kbuf: + kfree(kbuf); + return ret < 0 ? ret : len; +} + +static unsigned int rpmsg_eptdev_poll(struct file *filp, poll_table *wait) +{ + struct rpmsg_eptdev *eptdev = filp->private_data; + unsigned int mask = 0; + + if (!eptdev->ept) + return POLLERR; + + poll_wait(filp, &eptdev->readq, wait); + + if (!skb_queue_empty(&eptdev->queue)) + mask |= POLLIN | POLLRDNORM; + + mask |= rpmsg_poll(eptdev->ept, filp, wait); + + return mask; +} + +static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + struct rpmsg_eptdev *eptdev = fp->private_data; + + if (cmd != RPMSG_DESTROY_EPT_IOCTL) + return -EINVAL; + + return rpmsg_eptdev_destroy(&eptdev->dev, NULL); +} + +static const struct file_operations rpmsg_eptdev_fops = { + .owner = THIS_MODULE, + .open = rpmsg_eptdev_open, + .release = rpmsg_eptdev_release, + .read = rpmsg_eptdev_read, + .write = rpmsg_eptdev_write, + .poll = rpmsg_eptdev_poll, + .unlocked_ioctl = rpmsg_eptdev_ioctl, +}; + +static ssize_t name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", eptdev->chinfo.name); +} +static DEVICE_ATTR_RO(name); + +static ssize_t src_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", eptdev->chinfo.src); +} +static DEVICE_ATTR_RO(src); + +static ssize_t dst_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", eptdev->chinfo.dst); +} +static DEVICE_ATTR_RO(dst); + +static struct attribute *rpmsg_eptdev_attrs[] = { + &dev_attr_name.attr, + &dev_attr_src.attr, + &dev_attr_dst.attr, + NULL +}; +ATTRIBUTE_GROUPS(rpmsg_eptdev); + +static void rpmsg_eptdev_release_device(struct device *dev) +{ + struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev); + + ida_simple_remove(&rpmsg_ept_ida, dev->id); + ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt)); + cdev_del(&eptdev->cdev); + kfree(eptdev); +} + +static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev, + struct rpmsg_channel_info chinfo) +{ + struct rpmsg_device *rpdev = ctrldev->rpdev; + struct rpmsg_eptdev *eptdev; + struct device *dev; + int ret; + + eptdev = kzalloc(sizeof(*eptdev), GFP_KERNEL); + if (!eptdev) + return -ENOMEM; + + dev = &eptdev->dev; + eptdev->rpdev = rpdev; + eptdev->chinfo = chinfo; + + mutex_init(&eptdev->ept_lock); + spin_lock_init(&eptdev->queue_lock); + skb_queue_head_init(&eptdev->queue); + init_waitqueue_head(&eptdev->readq); + + device_initialize(dev); + dev->class = rpmsg_class; + dev->parent = &ctrldev->dev; + dev->groups = rpmsg_eptdev_groups; + dev_set_drvdata(dev, eptdev); + + cdev_init(&eptdev->cdev, &rpmsg_eptdev_fops); + eptdev->cdev.owner = THIS_MODULE; + + ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL); + if (ret < 0) + goto free_eptdev; + dev->devt = MKDEV(MAJOR(rpmsg_major), ret); + + ret = ida_simple_get(&rpmsg_ept_ida, 0, 0, GFP_KERNEL); + if (ret < 0) + goto free_minor_ida; + dev->id = ret; + dev_set_name(dev, "rpmsg%d", ret); + + ret = cdev_add(&eptdev->cdev, dev->devt, 1); + if (ret) + goto free_ept_ida; + + /* We can now rely on the release function for cleanup */ + dev->release = rpmsg_eptdev_release_device; + + ret = device_add(dev); + if (ret) { + dev_err(dev, "device_register failed: %d\n", ret); + put_device(dev); + } + + return ret; + +free_ept_ida: + ida_simple_remove(&rpmsg_ept_ida, dev->id); +free_minor_ida: + ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt)); +free_eptdev: + put_device(dev); + kfree(eptdev); + + return ret; +} + +static int rpmsg_ctrldev_open(struct inode *inode, struct file *filp) +{ + struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev); + + get_device(&ctrldev->dev); + filp->private_data = ctrldev; + + return 0; +} + +static int rpmsg_ctrldev_release(struct inode *inode, struct file *filp) +{ + struct rpmsg_ctrldev *ctrldev = cdev_to_ctrldev(inode->i_cdev); + + put_device(&ctrldev->dev); + + return 0; +} + +static long rpmsg_ctrldev_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + struct rpmsg_ctrldev *ctrldev = fp->private_data; + void __user *argp = (void __user *)arg; + struct rpmsg_endpoint_info eptinfo; + struct rpmsg_channel_info chinfo; + + if (cmd != RPMSG_CREATE_EPT_IOCTL) + return -EINVAL; + + if (copy_from_user(&eptinfo, argp, sizeof(eptinfo))) + return -EFAULT; + + memcpy(chinfo.name, eptinfo.name, RPMSG_NAME_SIZE); + chinfo.name[RPMSG_NAME_SIZE-1] = '\0'; + chinfo.src = eptinfo.src; + chinfo.dst = eptinfo.dst; + + return rpmsg_eptdev_create(ctrldev, chinfo); +}; + +static const struct file_operations rpmsg_ctrldev_fops = { + .owner = THIS_MODULE, + .open = rpmsg_ctrldev_open, + .release = rpmsg_ctrldev_release, + .unlocked_ioctl = rpmsg_ctrldev_ioctl, +}; + +static void rpmsg_ctrldev_release_device(struct device *dev) +{ + struct rpmsg_ctrldev *ctrldev = dev_to_ctrldev(dev); + + ida_simple_remove(&rpmsg_ctrl_ida, dev->id); + ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt)); + cdev_del(&ctrldev->cdev); + kfree(ctrldev); +} + +static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev) +{ + struct rpmsg_ctrldev *ctrldev; + struct device *dev; + int ret; + + ctrldev = kzalloc(sizeof(*ctrldev), GFP_KERNEL); + if (!ctrldev) + return -ENOMEM; + + ctrldev->rpdev = rpdev; + + dev = &ctrldev->dev; + device_initialize(dev); + dev->parent = &rpdev->dev; + dev->class = rpmsg_class; + + cdev_init(&ctrldev->cdev, &rpmsg_ctrldev_fops); + ctrldev->cdev.owner = THIS_MODULE; + + ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL); + if (ret < 0) + goto free_ctrldev; + dev->devt = MKDEV(MAJOR(rpmsg_major), ret); + + ret = ida_simple_get(&rpmsg_ctrl_ida, 0, 0, GFP_KERNEL); + if (ret < 0) + goto free_minor_ida; + dev->id = ret; + dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret); + + ret = cdev_add(&ctrldev->cdev, dev->devt, 1); + if (ret) + goto free_ctrl_ida; + + /* We can now rely on the release function for cleanup */ + dev->release = rpmsg_ctrldev_release_device; + + ret = device_add(dev); + if (ret) { + dev_err(&rpdev->dev, "device_register failed: %d\n", ret); + put_device(dev); + } + + dev_set_drvdata(&rpdev->dev, ctrldev); + + return ret; + +free_ctrl_ida: + ida_simple_remove(&rpmsg_ctrl_ida, dev->id); +free_minor_ida: + ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt)); +free_ctrldev: + put_device(dev); + kfree(ctrldev); + + return ret; +} + +static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev) +{ + struct rpmsg_ctrldev *ctrldev = dev_get_drvdata(&rpdev->dev); + int ret; + + /* Destroy all endpoints */ + ret = device_for_each_child(&ctrldev->dev, NULL, rpmsg_eptdev_destroy); + if (ret) + dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret); + + device_del(&ctrldev->dev); + put_device(&ctrldev->dev); +} + +static struct rpmsg_driver rpmsg_chrdev_driver = { + .probe = rpmsg_chrdev_probe, + .remove = rpmsg_chrdev_remove, + .drv = { + .name = "rpmsg_chrdev", + }, +}; + +static int rpmsg_char_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg"); + if (ret < 0) { + pr_err("rpmsg: failed to allocate char dev region\n"); + return ret; + } + + rpmsg_class = class_create(THIS_MODULE, "rpmsg"); + if (IS_ERR(rpmsg_class)) { + pr_err("failed to create rpmsg class\n"); + unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX); + return PTR_ERR(rpmsg_class); + } + + ret = register_rpmsg_driver(&rpmsg_chrdev_driver); + if (ret < 0) { + pr_err("rpmsgchr: failed to register rpmsg driver\n"); + class_destroy(rpmsg_class); + unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX); + } + + return ret; +} +postcore_initcall(rpmsg_char_init); + +static void rpmsg_chrdev_exit(void) +{ + unregister_rpmsg_driver(&rpmsg_chrdev_driver); + class_destroy(rpmsg_class); + unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX); +} +module_exit(rpmsg_chrdev_exit); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index e0a629eaceab..600f5f9f7431 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -71,6 +71,9 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev, rpmsg_rx_cb_t cb, void *priv, struct rpmsg_channel_info chinfo) { + if (WARN_ON(!rpdev)) + return NULL; + return rpdev->ops->create_ept(rpdev, cb, priv, chinfo); } EXPORT_SYMBOL(rpmsg_create_ept); @@ -80,11 +83,13 @@ EXPORT_SYMBOL(rpmsg_create_ept); * @ept: endpoing to destroy * * Should be used by drivers to destroy an rpmsg endpoint previously - * created with rpmsg_create_ept(). + * created with rpmsg_create_ept(). As with other types of "free" NULL + * is a valid parameter. */ void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) { - ept->ops->destroy_ept(ept); + if (ept) + ept->ops->destroy_ept(ept); } EXPORT_SYMBOL(rpmsg_destroy_ept); @@ -108,6 +113,11 @@ EXPORT_SYMBOL(rpmsg_destroy_ept); */ int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->send) + return -ENXIO; + return ept->ops->send(ept, data, len); } EXPORT_SYMBOL(rpmsg_send); @@ -132,6 +142,11 @@ EXPORT_SYMBOL(rpmsg_send); */ int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->sendto) + return -ENXIO; + return ept->ops->sendto(ept, data, len, dst); } EXPORT_SYMBOL(rpmsg_sendto); @@ -159,6 +174,11 @@ EXPORT_SYMBOL(rpmsg_sendto); int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, void *data, int len) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->send_offchannel) + return -ENXIO; + return ept->ops->send_offchannel(ept, src, dst, data, len); } EXPORT_SYMBOL(rpmsg_send_offchannel); @@ -182,6 +202,11 @@ EXPORT_SYMBOL(rpmsg_send_offchannel); */ int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->trysend) + return -ENXIO; + return ept->ops->trysend(ept, data, len); } EXPORT_SYMBOL(rpmsg_trysend); @@ -205,11 +230,36 @@ EXPORT_SYMBOL(rpmsg_trysend); */ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->trysendto) + return -ENXIO; + return ept->ops->trysendto(ept, data, len, dst); } EXPORT_SYMBOL(rpmsg_trysendto); /** + * rpmsg_poll() - poll the endpoint's send buffers + * @ept: the rpmsg endpoint + * @filp: file for poll_wait() + * @wait: poll_table for poll_wait() + * + * Returns mask representing the current state of the endpoint's send buffers + */ +unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, + poll_table *wait) +{ + if (WARN_ON(!ept)) + return 0; + if (!ept->ops->poll) + return 0; + + return ept->ops->poll(ept, filp, wait); +} +EXPORT_SYMBOL(rpmsg_poll); + +/** * rpmsg_send_offchannel() - send a message using explicit src/dst addresses * @ept: the rpmsg endpoint * @src: source address @@ -231,6 +281,11 @@ EXPORT_SYMBOL(rpmsg_trysendto); int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, void *data, int len) { + if (WARN_ON(!ept)) + return -EINVAL; + if (!ept->ops->trysend_offchannel) + return -ENXIO; + return ept->ops->trysend_offchannel(ept, src, dst, data, len); } EXPORT_SYMBOL(rpmsg_trysend_offchannel); @@ -315,6 +370,9 @@ static int rpmsg_dev_match(struct device *dev, struct device_driver *drv) const struct rpmsg_device_id *ids = rpdrv->id_table; unsigned int i; + if (rpdev->driver_override) + return !strcmp(rpdev->driver_override, drv->name); + if (ids) for (i = 0; ids[i].name[0]; i++) if (rpmsg_id_match(rpdev, &ids[i])) @@ -344,27 +402,30 @@ static int rpmsg_dev_probe(struct device *dev) struct rpmsg_device *rpdev = to_rpmsg_device(dev); struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver); struct rpmsg_channel_info chinfo = {}; - struct rpmsg_endpoint *ept; + struct rpmsg_endpoint *ept = NULL; int err; - strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE); - chinfo.src = rpdev->src; - chinfo.dst = RPMSG_ADDR_ANY; + if (rpdrv->callback) { + strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE); + chinfo.src = rpdev->src; + chinfo.dst = RPMSG_ADDR_ANY; - ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, chinfo); - if (!ept) { - dev_err(dev, "failed to create endpoint\n"); - err = -ENOMEM; - goto out; - } + ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, chinfo); + if (!ept) { + dev_err(dev, "failed to create endpoint\n"); + err = -ENOMEM; + goto out; + } - rpdev->ept = ept; - rpdev->src = ept->addr; + rpdev->ept = ept; + rpdev->src = ept->addr; + } err = rpdrv->probe(rpdev); if (err) { dev_err(dev, "%s: failed: %d\n", __func__, err); - rpmsg_destroy_ept(ept); + if (ept) + rpmsg_destroy_ept(ept); goto out; } @@ -385,7 +446,8 @@ static int rpmsg_dev_remove(struct device *dev) rpdrv->remove(rpdev); - rpmsg_destroy_ept(rpdev->ept); + if (rpdev->ept) + rpmsg_destroy_ept(rpdev->ept); return err; } diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h index 8075a20f919b..0cf9c7e2ee83 100644 --- a/drivers/rpmsg/rpmsg_internal.h +++ b/drivers/rpmsg/rpmsg_internal.h @@ -21,6 +21,7 @@ #define __RPMSG_INTERNAL_H__ #include <linux/rpmsg.h> +#include <linux/poll.h> #define to_rpmsg_device(d) container_of(d, struct rpmsg_device, dev) #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv) @@ -70,6 +71,8 @@ struct rpmsg_endpoint_ops { int (*trysendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); int (*trysend_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst, void *data, int len); + unsigned int (*poll)(struct rpmsg_endpoint *ept, struct file *filp, + poll_table *wait); }; int rpmsg_register_device(struct rpmsg_device *rpdev); @@ -79,4 +82,19 @@ int rpmsg_unregister_device(struct device *parent, struct device *rpmsg_find_device(struct device *parent, struct rpmsg_channel_info *chinfo); +/** + * rpmsg_chrdev_register_device() - register chrdev device based on rpdev + * @rpdev: prepared rpdev to be used for creating endpoints + * + * This function wraps rpmsg_register_device() preparing the rpdev for use as + * basis for the rpmsg chrdev. + */ +static inline int rpmsg_chrdev_register_device(struct rpmsg_device *rpdev) +{ + strcpy(rpdev->id.name, "rpmsg_chrdev"); + rpdev->driver_override = "rpmsg_chrdev"; + + return rpmsg_register_device(rpdev); +} + #endif diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 3090b0d3072f..1e66a4ea56ea 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -360,6 +360,14 @@ static const struct rpmsg_device_ops virtio_rpmsg_ops = { .announce_destroy = virtio_rpmsg_announce_destroy, }; +static void virtio_rpmsg_release_device(struct device *dev) +{ + struct rpmsg_device *rpdev = to_rpmsg_device(dev); + struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev); + + kfree(vch); +} + /* * create an rpmsg channel using its name and address info. * this function will be used to create both static and dynamic @@ -408,6 +416,7 @@ static struct rpmsg_device *rpmsg_create_channel(struct virtproc_info *vrp, strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE); rpdev->dev.parent = &vrp->vdev->dev; + rpdev->dev.release = virtio_rpmsg_release_device; ret = rpmsg_register_device(rpdev); if (ret) return NULL; diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 461b387d03cc..9c1dbf9fd986 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -10,6 +10,10 @@ config QCOM_GSBI functions for connecting the underlying serial UART, SPI, and I2C devices to the output pins. +config QCOM_MDT_LOADER + tristate + select QCOM_SCM + config QCOM_PM bool "Qualcomm Power Management" depends on ARCH_QCOM && !ARM64 @@ -29,17 +33,10 @@ config QCOM_SMEM The driver provides an interface to items in a heap shared among all processors in a Qualcomm platform. -config QCOM_SMD - tristate "Qualcomm Shared Memory Driver (SMD)" - depends on QCOM_SMEM - help - Say y here to enable support for the Qualcomm Shared Memory Driver - providing communication channels to remote processors in Qualcomm - platforms. - config QCOM_SMD_RPM tristate "Qualcomm Resource Power Manager (RPM) over SMD" - depends on QCOM_SMD && OF + depends on ARCH_QCOM + depends on RPMSG && OF help If you say yes to this option, support will be included for the Resource Power Manager system found in the Qualcomm 8974 based @@ -72,7 +69,48 @@ config QCOM_SMSM config QCOM_WCNSS_CTRL tristate "Qualcomm WCNSS control driver" - depends on QCOM_SMD + depends on ARCH_QCOM + depends on RPMSG help Client driver for the WCNSS_CTRL SMD channel, used to download nv firmware to a newly booted WCNSS chip. + Say y here to enable support for the Qualcomm Shared Memory Manager. + The driver provides an interface to items in a heap shared among all + processors in a Qualcomm platform. + +config MSM_BUS_SCALING + bool "Bus scaling driver" + default n + help + This option enables bus scaling on MSM devices. Bus scaling + allows devices to request the clocks be set to rates sufficient + for the active devices needs without keeping the clocks at max + frequency when a slower speed is sufficient. + +config MSM_BUSPM_DEV + tristate "MSM Bus Performance Monitor Kernel Module" + depends on MSM_BUS_SCALING + default n + help + This kernel module is used to mmap() hardware registers for the + performance monitors, counters, etc. The module can also be used to + allocate physical memory which is used by bus performance hardware to + dump performance data + +config BUS_TOPOLOGY_ADHOC + bool "ad-hoc bus scaling topology" + default n + help + This option enables a driver that can handle adhoc bus topologies. + Adhoc bus topology driver allows one to many connections and maintains + directionality of connections by explicitly listing device connections + thus avoiding illegal routes. + +config QTI_LNX_GPS_PROXY + tristate "User mode QTI_LNX_GPS_PROXY device driver support" + help + This supports user mode QTI_LNX_GPS_PROXY + + Note that this application programming interface is EXPERIMENTAL + and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes. + diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index fdd664edf0bd..5eb303e2647b 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -1,9 +1,13 @@ obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o +obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o obj-$(CONFIG_QCOM_PM) += spm.o -obj-$(CONFIG_QCOM_SMD) += smd.o obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o obj-$(CONFIG_QCOM_SMEM) += smem.o obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o obj-$(CONFIG_QCOM_SMP2P) += smp2p.o obj-$(CONFIG_QCOM_SMSM) += smsm.o obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o + +obj-$(CONFIG_MSM_BUS_SCALING) += msm_bus/ +obj-$(CONFIG_BUS_TOPOLOGY_ADHOC) += msm_bus/ +obj-$(CONFIG_QTI_LNX_GPS_PROXY) += gps_proxy.o diff --git a/drivers/soc/qcom/gps_proxy.c b/drivers/soc/qcom/gps_proxy.c new file mode 100644 index 000000000000..d0b129cc530d --- /dev/null +++ b/drivers/soc/qcom/gps_proxy.c @@ -0,0 +1,308 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> +#include <linux/serial.h> +#include <asm/uaccess.h> +#include <linux/seq_file.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <uapi/linux/gps_proxy.h> + +/* Module information */ +MODULE_DESCRIPTION( DRIVER_DESC ); +MODULE_LICENSE("GPL v2"); + +#define LOC_SERVICE_SVC_ID 0x00000010 +#define LOC_SERVICE_V1 2 +#define LOC_SERVICE_INS_ID 0 + +#define DRIVER_VERSION "v1.0" +#define DRIVER_DESC "GPS TTY driver" +#define MODULE_NAME "gps_proxy" +#define TTY_DRIVER_NAME "gps_serial" +#define TTY_DEV_NAME "ttyGPS" +#define MAX_TTY_BUFFER_SZ 0x10000 +#define GPS_TTY_MAJOR 100 /* experimental range */ +#define DEVICE_NAME "gps_proxy_ch" +#define CLASS_NAME "gps_proxy_class" + +static struct tty_driver *gps_proxy_tty_driver; +static struct tty_port gps_proxy_tty_port; +static bool g_port_open = false; +static struct semaphore g_port_sem; +static int gps_proxy_ch_driver_major = 0; +static struct class* gps_proxy_ch_class = 0; +static struct device* gps_proxy_ch_dev = 0; + +static void serial_port_shutdown(struct tty_port *tport) +{ +} + +static int serial_port_activate(struct tty_port *tport, struct tty_struct *tty) +{ + return 0; +} + +static struct tty_port_operations serial_port_ops = { + .activate = serial_port_activate, + .shutdown = serial_port_shutdown +}; + +static int gps_proxy_open(struct tty_struct *tty, struct file *file) +{ + int rc; + rc = tty_port_open(&gps_proxy_tty_port, tty, file); + g_port_open = true; + up(&g_port_sem); + return rc; +} + +static void gps_proxy_close(struct tty_struct *tty, struct file *file) +{ + tty_port_close(tty->port, tty, file); + g_port_open = false; + down(&g_port_sem); +} + +static void gps_proxy_tty_hangup(struct tty_struct *tty) +{ + tty_port_hangup(tty->port); +} + +static int gps_proxy_write(struct tty_struct *tty, + const unsigned char *buffer, int count) +{ + return count; +} + +static int gps_proxy_write_room(struct tty_struct *tty) +{ + return MAX_TTY_BUFFER_SZ; +} + +static int gps_proxy_tty_chars_in_buffer(struct tty_struct *tty) +{ + return 0; +} + + +static int gps_proxy_tiocmget(struct tty_struct *tty) +{ + return 0; +} + +static int gps_proxy_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear) +{ + return 0; +} + +static int gps_proxy_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + return 0; +} + +static struct tty_operations serial_ops = { + .open = gps_proxy_open, + .close = gps_proxy_close, + .hangup = gps_proxy_tty_hangup, + .write = gps_proxy_write, + .write_room = gps_proxy_write_room, + .chars_in_buffer = gps_proxy_tty_chars_in_buffer, + .tiocmget = gps_proxy_tiocmget, + .tiocmset = gps_proxy_tiocmset, + .ioctl = gps_proxy_ioctl, +}; + +int gps_proxy_ch_driver_open(struct inode *inode, struct file *filp) +{ + return 0; +} + +int gps_proxy_ch_driver_close(struct inode *inode, struct file *filp) +{ + return 0; +} + +long gps_proxy_chdev_ioctl(struct file *filp, unsigned int opt, unsigned long arg) +{ + int rc = 0; + struct gps_proxy_data buff; + switch (opt) + { + case QGPS_REGISTER_HANDLE: + /* DOWN is necessary to make client wait till port is open */ + if (!down_killable(&g_port_sem)) { + /* UP to semaphore is necessary here for close or + next register handle (for parity) */ + up(&g_port_sem); + rc = 0; + } + else { + rc = -EFAULT; + } + break; + case QGPS_SEND_NMEA: + pr_debug(KERN_INFO "Received string: %s\n", + ((struct gps_proxy_data*)arg)->nmea_string); + rc = access_ok(struct gps_proxy_data, (struct gps_proxy_data*)arg, + sizeof(struct gps_proxy_data)); + if (!rc) { + pr_err(KERN_ERR "Invalid argument was received\n"); + return rc; + } + rc = copy_from_user((void*)&buff, (void*)arg, sizeof(buff)); + if (rc) { + pr_err(KERN_ERR "Number of bytes that \ + couldn't be copied: %d", rc); + return -EFAULT; + } + if (buff.nmea_length < QMI_LOC_NMEA_STRING_MAX_LENGTH_V02 + 1) { + pr_debug(KERN_INFO "Received string: %s\n", + buff.nmea_string); + rc = tty_insert_flip_string(&gps_proxy_tty_port, + buff.nmea_string, + strnlen(buff.nmea_string, + QMI_LOC_NMEA_STRING_MAX_LENGTH_V02) + 1); + if (rc < 0) { + pr_err(KERN_ERR "Error flipping string"); + return rc; + } + tty_flip_buffer_push(&gps_proxy_tty_port); + } + else { + pr_err(KERN_ERR "Illegal message size"); + rc = -EFAULT; + } + break; + case QGPS_IS_ACTIVE: + if (g_port_open) + rc = 0; + else + rc = -EFAULT; + break; + default: + rc = -EFAULT; + break; + } + return rc; +} + +struct file_operations gps_proxy_ch_driver_ops = { + open: gps_proxy_ch_driver_open, + unlocked_ioctl: gps_proxy_chdev_ioctl, + release: gps_proxy_ch_driver_close +}; + +static int __init gps_proxy_init(void) +{ + int rc; + struct device *ttydev; + + sema_init(&g_port_sem,0); + + gps_proxy_ch_driver_major = register_chrdev(0, "gps_proxy_ch_dev", + &gps_proxy_ch_driver_ops); + if (gps_proxy_ch_driver_major < 0) { + pr_err(KERN_ERR "Failed to register char device\n"); + return -EFAULT; + } + else { + pr_debug(KERN_INFO "char device registered with major %d\n", + gps_proxy_ch_driver_major); + } + + /* Register the device class */ + gps_proxy_ch_class = class_create(THIS_MODULE, CLASS_NAME); + if (IS_ERR(gps_proxy_ch_class)){ + unregister_chrdev(gps_proxy_ch_driver_major, DEVICE_NAME); + pr_debug(KERN_ALERT "Failed to register device class\n"); + return -EFAULT; + } + pr_debug(KERN_INFO "EBBChar: device class registered correctly\n"); + + /* Register the device driver */ + gps_proxy_ch_dev = device_create(gps_proxy_ch_class, NULL, + MKDEV(gps_proxy_ch_driver_major, 0), NULL, DEVICE_NAME); + if (IS_ERR(gps_proxy_ch_dev)){ + class_destroy(gps_proxy_ch_class); + unregister_chrdev(gps_proxy_ch_driver_major, DEVICE_NAME); + pr_debug(KERN_ALERT "Failed to create the device\n"); + return -EFAULT; + } + + /* allocate the tty driver */ + gps_proxy_tty_driver = alloc_tty_driver(1); + if (!gps_proxy_tty_driver) + return -ENOMEM; + + tty_port_init(&gps_proxy_tty_port); + gps_proxy_tty_port.ops = &serial_port_ops; + + /* initialize the tty driver */ + gps_proxy_tty_driver->driver_name = TTY_DRIVER_NAME; + gps_proxy_tty_driver->name = TTY_DEV_NAME; + gps_proxy_tty_driver->major = GPS_TTY_MAJOR; + gps_proxy_tty_driver->minor_start = 0; + gps_proxy_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + gps_proxy_tty_driver->subtype = SERIAL_TYPE_NORMAL; + gps_proxy_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + gps_proxy_tty_driver->init_termios = tty_std_termios; + + tty_set_operations(gps_proxy_tty_driver, &serial_ops); + + /* register the tty driver */ + rc = tty_register_driver(gps_proxy_tty_driver); + if (rc) { + pr_err("Failed to register gps_proxy tty driver\n"); + put_tty_driver(gps_proxy_tty_driver); + return rc; + } + + ttydev = tty_port_register_device(&gps_proxy_tty_port, gps_proxy_tty_driver, 0, 0); + if (IS_ERR(ttydev)) { + rc = PTR_ERR(ttydev); + pr_err("Failed to register gps_proxy tty proxy\n"); + return rc; + } + + pr_debug(KERN_INFO DRIVER_DESC " \n" DRIVER_VERSION); + return rc; +} + +static void __exit gps_proxy_exit(void) +{ + tty_unregister_device(gps_proxy_tty_driver, 0); + tty_unregister_driver(gps_proxy_tty_driver); + unregister_chrdev(gps_proxy_ch_driver_major, "gps_proxy_ch_dev"); + device_destroy(gps_proxy_ch_class, MKDEV(gps_proxy_ch_driver_major, 0)); + class_unregister(gps_proxy_ch_class); + class_destroy(gps_proxy_ch_class); +} + +late_initcall(gps_proxy_init); +module_exit(gps_proxy_exit); diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c new file mode 100644 index 000000000000..bd63df0d14e0 --- /dev/null +++ b/drivers/soc/qcom/mdt_loader.c @@ -0,0 +1,204 @@ +/* + * Qualcomm Peripheral Image Loader + * + * Copyright (C) 2016 Linaro Ltd + * Copyright (C) 2015 Sony Mobile Communications Inc + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/device.h> +#include <linux/elf.h> +#include <linux/firmware.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/qcom_scm.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/soc/qcom/mdt_loader.h> + +static bool mdt_phdr_valid(const struct elf32_phdr *phdr) +{ + if (phdr->p_type != PT_LOAD) + return false; + + if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) + return false; + + if (!phdr->p_memsz) + return false; + + return true; +} + +/** + * qcom_mdt_get_size() - acquire size of the memory region needed to load mdt + * @fw: firmware object for the mdt file + * + * Returns size of the loaded firmware blob, or -EINVAL on failure. + */ +ssize_t qcom_mdt_get_size(const struct firmware *fw) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; + phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX; + phys_addr_t max_addr = 0; + int i; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!mdt_phdr_valid(phdr)) + continue; + + if (phdr->p_paddr < min_addr) + min_addr = phdr->p_paddr; + + if (phdr->p_paddr + phdr->p_memsz > max_addr) + max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); + } + + return min_addr < max_addr ? max_addr - min_addr : -EINVAL; +} +EXPORT_SYMBOL_GPL(qcom_mdt_get_size); + +/** + * qcom_mdt_load() - load the firmware which header is loaded as fw + * @dev: device handle to associate resources with + * @fw: firmware object for the mdt file + * @firmware: name of the firmware, for construction of segment file names + * @pas_id: PAS identifier + * @mem_region: allocated memory region to load firmware into + * @mem_phys: physical address of allocated memory region + * @mem_size: size of the allocated memory region + * + * Returns 0 on success, negative errno otherwise. + */ +int qcom_mdt_load(struct device *dev, const struct firmware *fw, + const char *firmware, int pas_id, void *mem_region, + phys_addr_t mem_phys, size_t mem_size) +{ + const struct elf32_phdr *phdrs; + const struct elf32_phdr *phdr; + const struct elf32_hdr *ehdr; + const struct firmware *seg_fw; + phys_addr_t mem_reloc; + phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX; + phys_addr_t max_addr = 0; + size_t fw_name_len; + ssize_t offset; + char *fw_name; + bool relocate = false; + void *ptr; + int ret; + int i; + + if (!fw || !mem_region || !mem_phys || !mem_size) + return -EINVAL; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + fw_name_len = strlen(firmware); + if (fw_name_len <= 4) + return -EINVAL; + + fw_name = kstrdup(firmware, GFP_KERNEL); + if (!fw_name) + return -ENOMEM; + + ret = qcom_scm_pas_init_image(pas_id, fw->data, fw->size); + if (ret) { + dev_err(dev, "invalid firmware metadata\n"); + goto out; + } + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!mdt_phdr_valid(phdr)) + continue; + + if (phdr->p_flags & QCOM_MDT_RELOCATABLE) + relocate = true; + + if (phdr->p_paddr < min_addr) + min_addr = phdr->p_paddr; + + if (phdr->p_paddr + phdr->p_memsz > max_addr) + max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); + } + + if (relocate) { + ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr); + if (ret) { + dev_err(dev, "unable to setup relocation\n"); + goto out; + } + + /* + * The image is relocatable, so offset each segment based on + * the lowest segment address. + */ + mem_reloc = min_addr; + } else { + /* + * Image is not relocatable, so offset each segment based on + * the allocated physical chunk of memory. + */ + mem_reloc = mem_phys; + } + + for (i = 0; i < ehdr->e_phnum; i++) { + phdr = &phdrs[i]; + + if (!mdt_phdr_valid(phdr)) + continue; + + offset = phdr->p_paddr - mem_reloc; + if (offset < 0 || offset + phdr->p_memsz > mem_size) { + dev_err(dev, "segment outside memory range\n"); + ret = -EINVAL; + break; + } + + ptr = mem_region + offset; + + if (phdr->p_filesz) { + sprintf(fw_name + fw_name_len - 3, "b%02d", i); + ret = request_firmware(&seg_fw, fw_name, dev); + if (ret) { + dev_err(dev, "failed to load %s\n", fw_name); + break; + } + + memcpy(ptr, seg_fw->data, seg_fw->size); + + release_firmware(seg_fw); + } + + if (phdr->p_memsz > phdr->p_filesz) + memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); + } + +out: + kfree(fw_name); + + return ret; +} +EXPORT_SYMBOL_GPL(qcom_mdt_load); + +MODULE_DESCRIPTION("Firmware parser for Qualcomm MDT format"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/msm_bus/Makefile b/drivers/soc/qcom/msm_bus/Makefile new file mode 100644 index 000000000000..2fbdc7011464 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/Makefile @@ -0,0 +1,27 @@ +# +# Makefile for msm-bus driver specific files +# +obj-y += msm_bus_bimc.o msm_bus_noc.o msm_bus_core.o msm_bus_client_api.o +obj-$(CONFIG_OF) += msm_bus_of.o +obj-$(CONFIG_QCOM_SMD_RPM) += msm_bus_rpm_smd.o +obj-$(CONFIG_QCOM_SMD_RPM) += qcom_rpm_msm_bus.o + +ifdef CONFIG_BUS_TOPOLOGY_ADHOC + obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o + obj-$(CONFIG_OF) += msm_bus_of_adhoc.o + obj-$(CONFIG_DEBUG_BUS_VOTER) += msm_bus_dbg_voter.o + # FIXME remove it temporarily till this driver is ported and tested + #obj-$(CONFIG_CORESIGHT) += msm_buspm_coresight_adhoc.o +else + obj-y += msm_bus_fabric.o msm_bus_config.o msm_bus_arb.o + obj-$(CONFIG_CORESIGHT) += msm_buspm_coresight.o +endif + +ifdef CONFIG_ARCH_MSM8974 + obj-$(CONFIG_ARCH_MSM8974) += msm_bus_board_8974.o +else + obj-y += msm_bus_id.o +endif + +obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o +obj-$(CONFIG_MSM_BUSPM_DEV) += msm-buspm-dev.o diff --git a/drivers/soc/qcom/msm_bus/msm-buspm-dev.c b/drivers/soc/qcom/msm_bus/msm-buspm-dev.c new file mode 100644 index 000000000000..867e7378448c --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm-buspm-dev.c @@ -0,0 +1,366 @@ +/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* #define DEBUG */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/dma-mapping.h> +#include <soc/qcom/rpm-smd.h> +#include <uapi/linux/msm-buspm-dev.h> + +#define MSM_BUSPM_DRV_NAME "msm-buspm-dev" + +#ifdef CONFIG_COMPAT +static long +msm_buspm_dev_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); +#else +#define msm_buspm_dev_compat_ioctl NULL +#endif + +static long +msm_buspm_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +static int msm_buspm_dev_mmap(struct file *filp, struct vm_area_struct *vma); +static int msm_buspm_dev_release(struct inode *inode, struct file *filp); +static int msm_buspm_dev_open(struct inode *inode, struct file *filp); + +static const struct file_operations msm_buspm_dev_fops = { + .owner = THIS_MODULE, + .mmap = msm_buspm_dev_mmap, + .open = msm_buspm_dev_open, + .unlocked_ioctl = msm_buspm_dev_ioctl, + .compat_ioctl = msm_buspm_dev_compat_ioctl, + .llseek = noop_llseek, + .release = msm_buspm_dev_release, +}; + +struct miscdevice msm_buspm_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = MSM_BUSPM_DRV_NAME, + .fops = &msm_buspm_dev_fops, +}; + + +enum msm_buspm_spdm_res { + SPDM_RES_ID = 0, + SPDM_RES_TYPE = 0x63707362, + SPDM_KEY = 0x00006e65, + SPDM_SIZE = 4, +}; +/* + * Allocate kernel buffer. + * Currently limited to one buffer per file descriptor. If alloc() is + * called twice for the same descriptor, the original buffer is freed. + * There is also no locking protection so the same descriptor can not be shared. + */ + +static inline void *msm_buspm_dev_get_vaddr(struct file *filp) +{ + struct msm_buspm_map_dev *dev = filp->private_data; + + return (dev) ? dev->vaddr : NULL; +} + +static inline unsigned int msm_buspm_dev_get_buflen(struct file *filp) +{ + struct msm_buspm_map_dev *dev = filp->private_data; + + return dev ? dev->buflen : 0; +} + +static inline unsigned long msm_buspm_dev_get_paddr(struct file *filp) +{ + struct msm_buspm_map_dev *dev = filp->private_data; + + return (dev) ? dev->paddr : 0L; +} + +static void msm_buspm_dev_free(struct file *filp) +{ + struct msm_buspm_map_dev *dev = filp->private_data; + + if (dev && dev->vaddr) { + pr_debug("freeing memory at 0x%p\n", dev->vaddr); + dma_free_coherent(msm_buspm_misc.this_device, dev->buflen, + dev->vaddr, dev->paddr); + dev->paddr = 0L; + dev->vaddr = NULL; + } +} + +static int msm_buspm_dev_open(struct inode *inode, struct file *filp) +{ + struct msm_buspm_map_dev *dev; + + if (capable(CAP_SYS_ADMIN)) { + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (dev) + filp->private_data = dev; + else + return -ENOMEM; + } else { + return -EPERM; + } + + return 0; +} + +static int +msm_buspm_dev_alloc(struct file *filp, struct buspm_alloc_params data) +{ + dma_addr_t paddr; + void *vaddr; + struct msm_buspm_map_dev *dev = filp->private_data; + + /* If buffer already allocated, then free it */ + if (dev->vaddr) + msm_buspm_dev_free(filp); + + /* Allocate uncached memory */ + vaddr = dma_alloc_coherent(msm_buspm_misc.this_device, data.size, + &paddr, GFP_KERNEL); + + if (vaddr == NULL) { + pr_err("allocation of 0x%zu bytes failed", data.size); + return -ENOMEM; + } + + dev->vaddr = vaddr; + dev->paddr = paddr; + dev->buflen = data.size; + filp->f_pos = 0; + pr_debug("virt addr = 0x%p\n", dev->vaddr); + pr_debug("phys addr = 0x%lx\n", dev->paddr); + + return 0; +} + +static int msm_bus_rpm_req(u32 rsc_type, u32 key, u32 hwid, + int ctx, u32 val) +{ + struct msm_rpm_request *rpm_req; + int ret, msg_id; + + rpm_req = msm_rpm_create_request(ctx, rsc_type, SPDM_RES_ID, 1); + if (rpm_req == NULL) { + pr_err("RPM: Couldn't create RPM Request\n"); + return -ENXIO; + } + + ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)&val, + (int)(sizeof(uint32_t))); + if (ret) { + pr_err("RPM: Add KVP failed for RPM Req:%u\n", + rsc_type); + goto err; + } + + pr_debug("Added Key: %d, Val: %u, size: %zu\n", key, + (uint32_t)val, sizeof(uint32_t)); + msg_id = msm_rpm_send_request(rpm_req); + if (!msg_id) { + pr_err("RPM: No message ID for req\n"); + ret = -ENXIO; + goto err; + } + + ret = msm_rpm_wait_for_ack(msg_id); + if (ret) { + pr_err("RPM: Ack failed\n"); + goto err; + } + +err: + msm_rpm_free_request(rpm_req); + return ret; +} + +static int msm_buspm_ioc_cmds(uint32_t arg) +{ + switch (arg) { + case MSM_BUSPM_SPDM_CLK_DIS: + case MSM_BUSPM_SPDM_CLK_EN: + return msm_bus_rpm_req(SPDM_RES_TYPE, SPDM_KEY, 0, + MSM_RPM_CTX_ACTIVE_SET, arg); + default: + pr_warn("Unsupported ioctl command: %d\n", arg); + return -EINVAL; + } +} + + + +static long +msm_buspm_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct buspm_xfer_req xfer; + struct buspm_alloc_params alloc_data; + unsigned long paddr; + int retval = 0; + void *buf = msm_buspm_dev_get_vaddr(filp); + unsigned int buflen = msm_buspm_dev_get_buflen(filp); + unsigned char *dbgbuf = buf; + + if (_IOC_TYPE(cmd) != MSM_BUSPM_IOC_MAGIC) { + pr_err("Wrong IOC_MAGIC.Exiting\n"); + return -ENOTTY; + } + + switch (cmd) { + case MSM_BUSPM_IOC_FREE: + pr_debug("cmd = 0x%x (FREE)\n", cmd); + msm_buspm_dev_free(filp); + break; + + case MSM_BUSPM_IOC_ALLOC: + pr_debug("cmd = 0x%x (ALLOC)\n", cmd); + retval = __get_user(alloc_data.size, (uint32_t __user *)arg); + + if (retval == 0) + retval = msm_buspm_dev_alloc(filp, alloc_data); + break; + + case MSM_BUSPM_IOC_RD_PHYS_ADDR: + pr_debug("Read Physical Address\n"); + paddr = msm_buspm_dev_get_paddr(filp); + if (paddr == 0L) { + retval = -EINVAL; + } else { + pr_debug("phys addr = 0x%lx\n", paddr); + retval = __put_user(paddr, + (unsigned long __user *)arg); + } + break; + + case MSM_BUSPM_IOC_RDBUF: + if (!buf) { + retval = -EINVAL; + break; + } + + pr_debug("Read Buffer: 0x%x%x%x%x\n", + dbgbuf[0], dbgbuf[1], dbgbuf[2], dbgbuf[3]); + + if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer))) { + retval = -EFAULT; + break; + } + + if ((xfer.size <= buflen) && + (copy_to_user((void __user *)xfer.data, buf, + xfer.size))) { + retval = -EFAULT; + break; + } + break; + + case MSM_BUSPM_IOC_WRBUF: + pr_debug("Write Buffer\n"); + + if (!buf) { + retval = -EINVAL; + break; + } + + if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer))) { + retval = -EFAULT; + break; + } + + if ((buflen <= xfer.size) && + (copy_from_user(buf, (void __user *)xfer.data, + xfer.size))) { + retval = -EFAULT; + break; + } + break; + + case MSM_BUSPM_IOC_CMD: + pr_debug("IOCTL command: cmd: %d arg: %lu\n", cmd, arg); + retval = msm_buspm_ioc_cmds(arg); + break; + + default: + pr_debug("Unknown command 0x%x\n", cmd); + retval = -EINVAL; + break; + } + + return retval; +} + +static int msm_buspm_dev_release(struct inode *inode, struct file *filp) +{ + struct msm_buspm_map_dev *dev = filp->private_data; + + msm_buspm_dev_free(filp); + kfree(dev); + filp->private_data = NULL; + + return 0; +} + +static int msm_buspm_dev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + pr_debug("vma = 0x%p\n", vma); + + /* Mappings are uncached */ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + return -EFAULT; + + return 0; +} + +#ifdef CONFIG_COMPAT +static long +msm_buspm_dev_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return msm_buspm_dev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +static int __init msm_buspm_dev_init(void) +{ + int ret = 0; + + ret = misc_register(&msm_buspm_misc); + if (ret < 0) + pr_err("%s: Cannot register misc device\n", __func__); + + if (msm_buspm_misc.this_device->coherent_dma_mask == 0) + msm_buspm_misc.this_device->coherent_dma_mask = + DMA_BIT_MASK(32); + + return ret; +} + +static void __exit msm_buspm_dev_exit(void) +{ + misc_deregister(&msm_buspm_misc); +} +module_init(msm_buspm_dev_init); +module_exit(msm_buspm_dev_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("1.0"); +MODULE_ALIAS("platform:"MSM_BUSPM_DRV_NAME); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h new file mode 100644 index 000000000000..cad0c53f2c73 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h @@ -0,0 +1,147 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H +#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H + +#include <linux/types.h> +#include <linux/device.h> +#include <linux/msm-bus-board.h> +#include <linux/msm-bus.h> +#include <linux/msm_bus_rules.h> +#include "msm_bus_core.h" + +struct msm_bus_node_device_type; +struct link_node { + uint64_t lnode_ib[NUM_CTX]; + uint64_t lnode_ab[NUM_CTX]; + int next; + struct device *next_dev; + struct list_head link; + uint32_t in_use; +}; + +/* New types introduced for adhoc topology */ +struct msm_bus_noc_ops { + int (*qos_init)(struct msm_bus_node_device_type *dev, + void __iomem *qos_base, uint32_t qos_off, + uint32_t qos_delta, uint32_t qos_freq); + int (*set_bw)(struct msm_bus_node_device_type *dev, + void __iomem *qos_base, uint32_t qos_off, + uint32_t qos_delta, uint32_t qos_freq); + int (*limit_mport)(struct msm_bus_node_device_type *dev, + void __iomem *qos_base, uint32_t qos_off, + uint32_t qos_delta, uint32_t qos_freq, bool enable_lim, + uint64_t lim_bw); + bool (*update_bw_reg)(int mode); +}; + +struct nodebw { + uint64_t ab[NUM_CTX]; + bool dirty; +}; + +struct msm_bus_fab_device_type { + void __iomem *qos_base; + phys_addr_t pqos_base; + size_t qos_range; + uint32_t base_offset; + uint32_t qos_freq; + uint32_t qos_off; + uint32_t util_fact; + uint32_t vrail_comp; + struct msm_bus_noc_ops noc_ops; + enum msm_bus_hw_sel bus_type; + bool bypass_qos_prg; +}; + +struct qos_params_type { + int mode; + unsigned int prio_lvl; + unsigned int prio_rd; + unsigned int prio_wr; + unsigned int prio1; + unsigned int prio0; + unsigned int gp; + unsigned int thmp; + unsigned int ws; + int cur_mode; + u64 bw_buffer; +}; + +struct msm_bus_node_info_type { + const char *name; + unsigned int id; + int mas_rpm_id; + int slv_rpm_id; + int num_ports; + int num_qports; + int *qport; + struct qos_params_type qos_params; + unsigned int num_connections; + unsigned int num_blist; + bool is_fab_dev; + bool virt_dev; + bool is_traversed; + unsigned int *connections; + unsigned int *black_listed_connections; + struct device **dev_connections; + struct device **black_connections; + unsigned int bus_device_id; + struct device *bus_device; + unsigned int buswidth; + struct rule_update_path_info rule; + uint64_t lim_bw; +}; + +struct msm_bus_node_device_type { + struct msm_bus_node_info_type *node_info; + struct msm_bus_fab_device_type *fabdev; + int num_lnodes; + struct link_node *lnode_list; + uint64_t cur_clk_hz[NUM_CTX]; + struct nodebw node_ab; + struct list_head link; + unsigned int ap_owned; + struct nodeclk clk[NUM_CTX]; + struct nodeclk qos_clk; +}; + +int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev, + bool throttle_en, uint64_t lim_bw); +int msm_bus_update_clks(struct msm_bus_node_device_type *nodedev, + int ctx, int **dirty_nodes, int *num_dirty); +int msm_bus_commit_data(int *dirty_nodes, int ctx, int num_dirty); +int msm_bus_update_bw(struct msm_bus_node_device_type *nodedev, int ctx, + int64_t add_bw, int **dirty_nodes, int *num_dirty); +void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size, + size_t new_size, gfp_t flags); + +extern struct msm_bus_device_node_registration + *msm_bus_of_to_pdata(struct platform_device *pdev); +extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops); +extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev); +extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev); +extern int msm_bus_of_get_static_rules(struct platform_device *pdev, + struct bus_rule_type **static_rule); +extern int msm_rules_update_path(struct list_head *input_list, + struct list_head *output_list); +extern void print_all_rules(void); +#ifdef CONFIG_DEBUG_BUS_VOTER +int msm_bus_floor_init(struct device *dev); +#else +static inline int msm_bus_floor_init(struct device *dev) +{ + return 0; +} +#endif /* CONFIG_DBG_BUS_VOTER */ +#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */ diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb.c b/drivers/soc/qcom/msm_bus/msm_bus_arb.c new file mode 100644 index 000000000000..0a92e182210d --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_arb.c @@ -0,0 +1,1137 @@ +/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/radix-tree.h> +#include <linux/clk.h> +#include <linux/msm-bus.h> +#include "msm_bus_core.h" +#include <trace/events/trace_msm_bus.h> + +#define INDEX_MASK 0x0000FFFF +#define PNODE_MASK 0xFFFF0000 +#define SHIFT_VAL 16 +#define CREATE_PNODE_ID(n, i) (((n) << SHIFT_VAL) | (i)) +#define GET_INDEX(n) ((n) & INDEX_MASK) +#define GET_NODE(n) ((n) >> SHIFT_VAL) +#define IS_NODE(n) ((n) % FABRIC_ID_KEY) +#define SEL_FAB_CLK 1 +#define SEL_SLAVE_CLK 0 +/* + * To get to BIMC BW convert Hz to bytes by multiplying bus width(8), + * double-data-rate(2) * ddr-channels(2). + */ +#define GET_BIMC_BW(clk) (clk * 8 * 2 * 2) + +#define BW_TO_CLK_FREQ_HZ(width, bw) \ + msm_bus_div64(width, bw) + +#define IS_MASTER_VALID(mas) \ + (((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \ + ? 1 : 0) + +#define IS_SLAVE_VALID(slv) \ + (((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0) + +static DEFINE_MUTEX(msm_bus_lock); + +/* This function uses shift operations to divide 64 bit value for higher + * efficiency. The divisor expected are number of ports or bus-width. + * These are expected to be 1, 2, 4, 8, 16 and 32 in most cases. + * + * To account for exception to the above divisor values, the standard + * do_div function is used. + * */ +uint64_t msm_bus_div64(unsigned int w, uint64_t bw) +{ + uint64_t *b = &bw; + + if ((bw > 0) && (bw < w)) + return 1; + + switch (w) { + case 0: + WARN(1, "AXI: Divide by 0 attempted\n"); + case 1: return bw; + case 2: return (bw >> 1); + case 4: return (bw >> 2); + case 8: return (bw >> 3); + case 16: return (bw >> 4); + case 32: return (bw >> 5); + } + + do_div(*b, w); + return *b; +} + +/** + * add_path_node: Adds the path information to the current node + * @info: Internal node info structure + * @next: Combination of the id and index of the next node + * Function returns: Number of pnodes (path_nodes) on success, + * error on failure. + * + * Every node maintains the list of path nodes. A path node is + * reached by finding the node-id and index stored at the current + * node. This makes updating the paths with requested bw and clock + * values efficient, as it avoids lookup for each update-path request. + */ +static int add_path_node(struct msm_bus_inode_info *info, int next) +{ + struct path_node *pnode; + int i; + if (ZERO_OR_NULL_PTR(info)) { + MSM_BUS_ERR("Cannot find node info!: id :%d\n", + info->node_info->priv_id); + return -ENXIO; + } + + for (i = 0; i <= info->num_pnodes; i++) { + if (info->pnode[i].next == -2) { + MSM_BUS_DBG("Reusing pnode for info: %d at index: %d\n", + info->node_info->priv_id, i); + info->pnode[i].clk[DUAL_CTX] = 0; + info->pnode[i].clk[ACTIVE_CTX] = 0; + info->pnode[i].bw[DUAL_CTX] = 0; + info->pnode[i].bw[ACTIVE_CTX] = 0; + info->pnode[i].next = next; + MSM_BUS_DBG("%d[%d] : (%d, %d)\n", + info->node_info->priv_id, i, GET_NODE(next), + GET_INDEX(next)); + return i; + } + } + + info->num_pnodes++; + pnode = krealloc(info->pnode, + ((info->num_pnodes + 1) * sizeof(struct path_node)) + , GFP_KERNEL); + if (ZERO_OR_NULL_PTR(pnode)) { + MSM_BUS_ERR("Error creating path node!\n"); + info->num_pnodes--; + return -ENOMEM; + } + info->pnode = pnode; + info->pnode[info->num_pnodes].clk[DUAL_CTX] = 0; + info->pnode[info->num_pnodes].clk[ACTIVE_CTX] = 0; + info->pnode[info->num_pnodes].bw[DUAL_CTX] = 0; + info->pnode[info->num_pnodes].bw[ACTIVE_CTX] = 0; + info->pnode[info->num_pnodes].next = next; + MSM_BUS_DBG("%d[%d] : (%d, %d)\n", info->node_info->priv_id, + info->num_pnodes, GET_NODE(next), GET_INDEX(next)); + return info->num_pnodes; +} + +static int clearvisitedflag(struct device *dev, void *data) +{ + struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev); + fabdev->visited = false; + return 0; +} + +/** + * getpath() - Finds the path from the topology between src and dest + * @src: Source. This is the master from which the request originates + * @dest: Destination. This is the slave to which we're trying to reach + * + * Function returns: next_pnode_id. The higher 16 bits of the next_pnode_id + * represent the src id of the next node on path. The lower 16 bits of the + * next_pnode_id represent the "index", which is the next entry in the array + * of pnodes for that node to fill in clk and bw values. This is created using + * CREATE_PNODE_ID. The return value is stored in ret_pnode, and this is added + * to the list of path nodes. + * + * This function recursively finds the path by updating the src to the + * closest possible node to dest. + */ +static int getpath(int src, int dest) +{ + int pnode_num = -1, i; + struct msm_bus_fabnodeinfo *fabnodeinfo; + struct msm_bus_fabric_device *fabdev; + int next_pnode_id = -1; + struct msm_bus_inode_info *info = NULL; + int _src = src/FABRIC_ID_KEY; + int _dst = dest/FABRIC_ID_KEY; + int ret_pnode = -1; + int fabid = GET_FABID(src); + + /* Find the location of fabric for the src */ + MSM_BUS_DBG("%d --> %d\n", src, dest); + + fabdev = msm_bus_get_fabric_device(fabid); + if (!fabdev) { + MSM_BUS_WARN("Fabric Not yet registered. Try again\n"); + return -ENXIO; + } + + /* Are we there yet? */ + if (src == dest) { + info = fabdev->algo->find_node(fabdev, src); + if (ZERO_OR_NULL_PTR(info)) { + MSM_BUS_ERR("Node %d not found\n", dest); + return -ENXIO; + } + + for (i = 0; i <= info->num_pnodes; i++) { + if (info->pnode[i].next == -2) { + MSM_BUS_DBG("src = dst Reusing pnode for" + " info: %d at index: %d\n", + info->node_info->priv_id, i); + next_pnode_id = CREATE_PNODE_ID(src, i); + info->pnode[i].clk[DUAL_CTX] = 0; + info->pnode[i].bw[DUAL_CTX] = 0; + info->pnode[i].next = next_pnode_id; + MSM_BUS_DBG("returning: %d, %d\n", GET_NODE + (next_pnode_id), GET_INDEX(next_pnode_id)); + return next_pnode_id; + } + } + next_pnode_id = CREATE_PNODE_ID(src, (info->num_pnodes + 1)); + pnode_num = add_path_node(info, next_pnode_id); + if (pnode_num < 0) { + MSM_BUS_ERR("Error adding path node\n"); + return -ENXIO; + } + MSM_BUS_DBG("returning: %d, %d\n", GET_NODE(next_pnode_id), + GET_INDEX(next_pnode_id)); + return next_pnode_id; + } else if (_src == _dst) { + /* + * src and dest belong to same fabric, find the destination + * from the radix tree + */ + info = fabdev->algo->find_node(fabdev, dest); + if (ZERO_OR_NULL_PTR(info)) { + MSM_BUS_ERR("Node %d not found\n", dest); + return -ENXIO; + } + + ret_pnode = getpath(info->node_info->priv_id, dest); + next_pnode_id = ret_pnode; + } else { + /* find the dest fabric */ + int trynextgw = true; + struct list_head *gateways = fabdev->algo->get_gw_list(fabdev); + list_for_each_entry(fabnodeinfo, gateways, list) { + /* see if the destination is at a connected fabric */ + if (_dst == (fabnodeinfo->info->node_info->priv_id / + FABRIC_ID_KEY)) { + /* Found the fab on which the device exists */ + info = fabnodeinfo->info; + trynextgw = false; + ret_pnode = getpath(info->node_info->priv_id, + dest); + pnode_num = add_path_node(info, ret_pnode); + if (pnode_num < 0) { + MSM_BUS_ERR("Error adding path node\n"); + return -ENXIO; + } + next_pnode_id = CREATE_PNODE_ID( + info->node_info->priv_id, pnode_num); + break; + } + } + + /* find the gateway */ + if (trynextgw) { + gateways = fabdev->algo->get_gw_list(fabdev); + list_for_each_entry(fabnodeinfo, gateways, list) { + struct msm_bus_fabric_device *gwfab = + msm_bus_get_fabric_device(fabnodeinfo-> + info->node_info->priv_id); + if (!gwfab) { + MSM_BUS_ERR("Err: No gateway found\n"); + return -ENXIO; + } + + if (!gwfab->visited) { + MSM_BUS_DBG("VISITED ID: %d\n", + gwfab->id); + gwfab->visited = true; + info = fabnodeinfo->info; + ret_pnode = getpath(info-> + node_info->priv_id, dest); + pnode_num = add_path_node(info, + ret_pnode); + if (pnode_num < 0) { + MSM_BUS_ERR("Malloc failure in" + " adding path node\n"); + return -ENXIO; + } + next_pnode_id = CREATE_PNODE_ID( + info->node_info->priv_id, pnode_num); + break; + } + } + if (next_pnode_id < 0) + return -ENXIO; + } + } + + if (!IS_NODE(src)) { + MSM_BUS_DBG("Returning next_pnode_id:%d[%d]\n", GET_NODE( + next_pnode_id), GET_INDEX(next_pnode_id)); + return next_pnode_id; + } + info = fabdev->algo->find_node(fabdev, src); + if (!info) { + MSM_BUS_ERR("Node info not found.\n"); + return -ENXIO; + } + + pnode_num = add_path_node(info, next_pnode_id); + MSM_BUS_DBG(" Last: %d[%d] = (%d, %d)\n", + src, info->num_pnodes, GET_NODE(next_pnode_id), + GET_INDEX(next_pnode_id)); + MSM_BUS_DBG("returning: %d, %d\n", src, pnode_num); + return CREATE_PNODE_ID(src, pnode_num); +} + +static uint64_t get_node_maxib(struct msm_bus_inode_info *info) +{ + int i, ctx; + uint64_t maxib = 0; + + for (i = 0; i <= info->num_pnodes; i++) { + for (ctx = 0; ctx < NUM_CTX; ctx++) + maxib = max(info->pnode[i].clk[ctx], maxib); + } + + MSM_BUS_DBG("%s: Node %d numpnodes %d maxib %llu", __func__, + info->num_pnodes, info->node_info->id, maxib); + return maxib; +} + + +static uint64_t get_node_sumab(struct msm_bus_inode_info *info) +{ + int i; + uint64_t maxab = 0; + + for (i = 0; i <= info->num_pnodes; i++) + maxab += info->pnode[i].bw[DUAL_CTX]; + + MSM_BUS_DBG("%s: Node %d numpnodes %d maxib %llu", __func__, + info->num_pnodes, info->node_info->id, maxab); + return maxab; +} + +static uint64_t get_vfe_bw(void) +{ + int vfe_id = MSM_BUS_MASTER_VFE; + int iid = msm_bus_board_get_iid(vfe_id); + int fabid; + struct msm_bus_fabric_device *fabdev; + struct msm_bus_inode_info *info; + uint64_t vfe_bw = 0; + + fabid = GET_FABID(iid); + fabdev = msm_bus_get_fabric_device(fabid); + info = fabdev->algo->find_node(fabdev, iid); + + if (!info) { + MSM_BUS_ERR("%s: Can't find node %d", __func__, + vfe_id); + goto exit_get_vfe_bw; + } + + vfe_bw = get_node_sumab(info); + MSM_BUS_DBG("vfe_ab %llu", vfe_bw); + +exit_get_vfe_bw: + return vfe_bw; +} + +static uint64_t get_mdp_bw(void) +{ + int ids[] = {MSM_BUS_MASTER_MDP_PORT0, MSM_BUS_MASTER_MDP_PORT1}; + int i; + uint64_t mdp_ab = 0; + uint32_t ff = 0; + + for (i = 0; i < ARRAY_SIZE(ids); i++) { + int iid = msm_bus_board_get_iid(ids[i]); + int fabid; + struct msm_bus_fabric_device *fabdev; + struct msm_bus_inode_info *info; + + fabid = GET_FABID(iid); + fabdev = msm_bus_get_fabric_device(fabid); + info = fabdev->algo->find_node(fabdev, iid); + + if (!info) { + MSM_BUS_ERR("%s: Can't find node %d", __func__, + ids[i]); + continue; + } + + mdp_ab += get_node_sumab(info); + MSM_BUS_DBG("mdp_ab %llu", mdp_ab); + ff = info->node_info->ff; + } + + if (ff) { + mdp_ab = msm_bus_div64(2 * ff, 100 * mdp_ab); + } else { + MSM_BUS_ERR("MDP FF is 0"); + mdp_ab = 0; + } + + + MSM_BUS_DBG("MDP BW %llu\n", mdp_ab); + return mdp_ab; +} + +static uint64_t get_rt_bw(void) +{ + uint64_t rt_bw = 0; + + rt_bw += get_mdp_bw(); + rt_bw += get_vfe_bw(); + + return rt_bw; +} + +static uint64_t get_avail_bw(struct msm_bus_fabric_device *fabdev) +{ + uint64_t fabclk_rate = 0; + int i; + uint64_t avail_bw = 0; + uint64_t rt_bw = get_rt_bw(); + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + + if (!rt_bw) + goto exit_get_avail_bw; + + for (i = 0; i < NUM_CTX; i++) { + uint64_t ctx_rate; + ctx_rate = + fabric->info.nodeclk[i].rate; + fabclk_rate = max(ctx_rate, fabclk_rate); + } + + if (!fabdev->eff_fact || !fabdev->nr_lim_thresh) { + MSM_BUS_ERR("Error: Eff-fact %d; nr_thresh %llu", + fabdev->eff_fact, fabdev->nr_lim_thresh); + return 0; + } + + avail_bw = msm_bus_div64(100, + (GET_BIMC_BW(fabclk_rate) * fabdev->eff_fact)); + + if (avail_bw >= fabdev->nr_lim_thresh) + return 0; + + MSM_BUS_DBG("%s: Total_avail_bw %llu, rt_bw %llu\n", + __func__, avail_bw, rt_bw); + trace_bus_avail_bw(avail_bw, rt_bw); + + if (avail_bw < rt_bw) { + MSM_BUS_ERR("\n%s: ERROR avail BW %llu < MDP %llu", + __func__, avail_bw, rt_bw); + avail_bw = 0; + goto exit_get_avail_bw; + } + avail_bw -= rt_bw; + +exit_get_avail_bw: + return avail_bw; +} + +static void program_nr_limits(struct msm_bus_fabric_device *fabdev) +{ + int num_nr_lim = 0; + int i; + struct msm_bus_inode_info *info[fabdev->num_nr_lim]; + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + + num_nr_lim = radix_tree_gang_lookup_tag(&fabric->fab_tree, + (void **)&info, fabric->fabdev.id, fabdev->num_nr_lim, + MASTER_NODE); + + for (i = 0; i < num_nr_lim; i++) + fabdev->algo->config_limiter(fabdev, info[i]); +} + +static int msm_bus_commit_limiter(struct device *dev, void *data) +{ + int ret = 0; + struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev); + + MSM_BUS_DBG("fabid: %d\n", fabdev->id); + program_nr_limits(fabdev); + return ret; +} + +static void compute_nr_limits(struct msm_bus_fabric_device *fabdev, int pnode) +{ + uint64_t total_ib = 0; + int num_nr_lim = 0; + uint64_t avail_bw = 0; + struct msm_bus_inode_info *info[fabdev->num_nr_lim]; + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + int i; + + num_nr_lim = radix_tree_gang_lookup_tag(&fabric->fab_tree, + (void **)&info, fabric->fabdev.id, fabdev->num_nr_lim, + MASTER_NODE); + + MSM_BUS_DBG("%s: Found %d NR LIM nodes", __func__, num_nr_lim); + for (i = 0; i < num_nr_lim; i++) + total_ib += get_node_maxib(info[i]); + + avail_bw = get_avail_bw(fabdev); + MSM_BUS_DBG("\n %s: Avail BW %llu", __func__, avail_bw); + + for (i = 0; i < num_nr_lim; i++) { + uint32_t node_pct = 0; + uint64_t new_lim_bw = 0; + uint64_t node_max_ib = 0; + uint32_t node_max_ib_kB = 0; + uint32_t total_ib_kB = 0; + uint64_t bw_node; + + node_max_ib = get_node_maxib(info[i]); + node_max_ib_kB = msm_bus_div64(1024, node_max_ib); + total_ib_kB = msm_bus_div64(1024, total_ib); + node_pct = (node_max_ib_kB * 100) / total_ib_kB; + bw_node = node_pct * avail_bw; + new_lim_bw = msm_bus_div64(100, bw_node); + + /* + * if limiter bw is more than the requested IB clip to + requested IB. + */ + if (new_lim_bw >= node_max_ib) + new_lim_bw = node_max_ib; + + /* + * if there is a floor bw for this nr lim node and + * if there is available bw to divy up among the nr masters + * and if the nr lim masters have a non zero vote and + * if the limited bw is below the floor for this node. + * then limit this node to the floor bw. + */ + if (info[i]->node_info->floor_bw && node_max_ib && avail_bw && + (new_lim_bw <= info[i]->node_info->floor_bw)) { + MSM_BUS_ERR("\nNode %d:Limiting BW:%llu < floor:%llu", + info[i]->node_info->id, new_lim_bw, + info[i]->node_info->floor_bw); + new_lim_bw = info[i]->node_info->floor_bw; + } + + if (new_lim_bw != info[i]->cur_lim_bw) { + info[i]->cur_lim_bw = new_lim_bw; + MSM_BUS_DBG("NodeId %d: Requested IB %llu", + info[i]->node_info->id, node_max_ib); + MSM_BUS_DBG("Limited to %llu(%d pct of Avail %llu )\n", + new_lim_bw, node_pct, avail_bw); + } else { + MSM_BUS_DBG("NodeId %d: No change Limited to %llu\n", + info[i]->node_info->id, info[i]->cur_lim_bw); + } + } +} + +static void setup_nr_limits(int curr, int pnode) +{ + struct msm_bus_fabric_device *fabdev = + msm_bus_get_fabric_device(GET_FABID(curr)); + struct msm_bus_inode_info *info; + + if (!fabdev) { + MSM_BUS_WARN("Fabric Not yet registered. Try again\n"); + goto exit_setup_nr_limits; + } + + /* This logic is currently applicable to BIMC masters only */ + if (fabdev->id != MSM_BUS_FAB_DEFAULT) { + MSM_BUS_ERR("Static limiting of NR masters only for BIMC\n"); + goto exit_setup_nr_limits; + } + + info = fabdev->algo->find_node(fabdev, curr); + if (!info) { + MSM_BUS_ERR("Cannot find node info!\n"); + goto exit_setup_nr_limits; + } + + compute_nr_limits(fabdev, pnode); +exit_setup_nr_limits: + return; +} + +static bool is_nr_lim(int id) +{ + struct msm_bus_fabric_device *fabdev = msm_bus_get_fabric_device + (GET_FABID(id)); + struct msm_bus_inode_info *info; + bool ret = false; + + if (!fabdev) { + MSM_BUS_ERR("Bus device for bus ID: %d not found!\n", + GET_FABID(id)); + goto exit_is_nr_lim; + } + + info = fabdev->algo->find_node(fabdev, id); + if (!info) + MSM_BUS_ERR("Cannot find node info %d!\n", id); + else if ((info->node_info->nr_lim || info->node_info->rt_mas)) + ret = true; +exit_is_nr_lim: + return ret; +} + +/** + * update_path() - Update the path with the bandwidth and clock values, as + * requested by the client. + * + * @curr: Current source node, as specified in the client vector (master) + * @pnode: The first-hop node on the path, stored in the internal client struct + * @req_clk: Requested clock value from the vector + * @req_bw: Requested bandwidth value from the vector + * @curr_clk: Current clock frequency + * @curr_bw: Currently allocated bandwidth + * + * This function updates the nodes on the path calculated using getpath(), with + * clock and bandwidth values. The sum of bandwidths, and the max of clock + * frequencies is calculated at each node on the path. Commit data to be sent + * to RPM for each master and slave is also calculated here. + */ +static int update_path(int curr, int pnode, uint64_t req_clk, uint64_t req_bw, + uint64_t curr_clk, uint64_t curr_bw, unsigned int ctx, unsigned int + cl_active_flag) +{ + int index, ret = 0; + struct msm_bus_inode_info *info; + struct msm_bus_inode_info *src_info; + int next_pnode; + int64_t add_bw = req_bw - curr_bw; + uint64_t bwsum = 0; + uint64_t req_clk_hz, curr_clk_hz, bwsum_hz; + int *master_tiers; + struct msm_bus_fabric_device *fabdev = msm_bus_get_fabric_device + (GET_FABID(curr)); + + if (!fabdev) { + MSM_BUS_ERR("Bus device for bus ID: %d not found!\n", + GET_FABID(curr)); + return -ENXIO; + } + + MSM_BUS_DBG("args: %d %d %d %llu %llu %llu %llu %u\n", + curr, GET_NODE(pnode), GET_INDEX(pnode), req_clk, req_bw, + curr_clk, curr_bw, ctx); + index = GET_INDEX(pnode); + MSM_BUS_DBG("Client passed index :%d\n", index); + info = fabdev->algo->find_node(fabdev, curr); + if (!info) { + MSM_BUS_ERR("Cannot find node info!\n"); + return -ENXIO; + } + src_info = info; + + info->link_info.sel_bw = &info->link_info.bw[ctx]; + info->link_info.sel_clk = &info->link_info.clk[ctx]; + *info->link_info.sel_bw += add_bw; + + info->pnode[index].sel_bw = &info->pnode[index].bw[ctx]; + + /** + * To select the right clock, AND the context with + * client active flag. + */ + info->pnode[index].sel_clk = &info->pnode[index].clk[ctx & + cl_active_flag]; + *info->pnode[index].sel_bw += add_bw; + *info->pnode[index].sel_clk = req_clk; + + /** + * If master supports dual configuration, check if + * the configuration needs to be changed based on + * incoming requests + */ + if (info->node_info->dual_conf) { + uint64_t node_maxib = 0; + node_maxib = get_node_maxib(info); + fabdev->algo->config_master(fabdev, info, + node_maxib, req_bw); + } + + info->link_info.num_tiers = info->node_info->num_tiers; + info->link_info.tier = info->node_info->tier; + master_tiers = info->node_info->tier; + + do { + struct msm_bus_inode_info *hop; + fabdev = msm_bus_get_fabric_device(GET_FABID(curr)); + if (!fabdev) { + MSM_BUS_ERR("Fabric not found\n"); + return -ENXIO; + } + MSM_BUS_DBG("id: %d\n", info->node_info->priv_id); + + /* find next node and index */ + next_pnode = info->pnode[index].next; + curr = GET_NODE(next_pnode); + index = GET_INDEX(next_pnode); + MSM_BUS_DBG("id:%d, next: %d\n", info-> + node_info->priv_id, curr); + + /* Get hop */ + /* check if we are here as gateway, or does the hop belong to + * this fabric */ + if (IS_NODE(curr)) + hop = fabdev->algo->find_node(fabdev, curr); + else + hop = fabdev->algo->find_gw_node(fabdev, curr); + if (!hop) { + MSM_BUS_ERR("Null Info found for hop\n"); + return -ENXIO; + } + + hop->link_info.sel_bw = &hop->link_info.bw[ctx]; + hop->link_info.sel_clk = &hop->link_info.clk[ctx]; + *hop->link_info.sel_bw += add_bw; + + hop->pnode[index].sel_bw = &hop->pnode[index].bw[ctx]; + hop->pnode[index].sel_clk = &hop->pnode[index].clk[ctx & + cl_active_flag]; + + if (!hop->node_info->buswidth) { + MSM_BUS_WARN("No bus width found. Using default\n"); + hop->node_info->buswidth = 8; + } + *hop->pnode[index].sel_clk = BW_TO_CLK_FREQ_HZ(hop->node_info-> + buswidth, req_clk); + *hop->pnode[index].sel_bw += add_bw; + MSM_BUS_DBG("fabric: %d slave: %d, slave-width: %d info: %d\n", + fabdev->id, hop->node_info->priv_id, hop->node_info-> + buswidth, info->node_info->priv_id); + /* Update Bandwidth */ + fabdev->algo->update_bw(fabdev, hop, info, add_bw, + master_tiers, ctx); + bwsum = *hop->link_info.sel_bw; + /* Update Fabric clocks */ + curr_clk_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth, + curr_clk); + req_clk_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth, + req_clk); + bwsum_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth, + bwsum); + /* Account for multiple channels if any */ + if (hop->node_info->num_sports > 1) + bwsum_hz = msm_bus_div64(hop->node_info->num_sports, + bwsum_hz); + MSM_BUS_DBG("AXI: Hop: %d, ports: %d, bwsum_hz: %llu\n", + hop->node_info->id, hop->node_info->num_sports, + bwsum_hz); + MSM_BUS_DBG("up-clk: curr_hz: %llu, req_hz: %llu, bw_hz %llu\n", + curr_clk, req_clk, bwsum_hz); + ret = fabdev->algo->update_clks(fabdev, hop, index, + curr_clk_hz, req_clk_hz, bwsum_hz, SEL_FAB_CLK, + ctx, cl_active_flag); + if (ret) + MSM_BUS_WARN("Failed to update clk\n"); + info = hop; + } while (GET_NODE(info->pnode[index].next) != info->node_info->priv_id); + + /* Update BW, clk after exiting the loop for the last one */ + if (!info) { + MSM_BUS_ERR("Cannot find node info!\n"); + return -ENXIO; + } + + /* Update slave clocks */ + ret = fabdev->algo->update_clks(fabdev, info, index, curr_clk_hz, + req_clk_hz, bwsum_hz, SEL_SLAVE_CLK, ctx, cl_active_flag); + if (ret) + MSM_BUS_ERR("Failed to update clk\n"); + + if ((ctx == cl_active_flag) && + ((src_info->node_info->nr_lim || src_info->node_info->rt_mas))) + setup_nr_limits(curr, pnode); + + /* If freq is going down , apply the changes now before + * we commit clk data. + */ + if ((req_clk < curr_clk) || (req_bw < curr_bw)) + bus_for_each_dev(&msm_bus_type, NULL, NULL, + msm_bus_commit_limiter); + return ret; +} + +/** + * msm_bus_commit_fn() - Commits the data for fabric to rpm + * @dev: fabric device + * @data: NULL + */ +static int msm_bus_commit_fn(struct device *dev, void *data) +{ + int ret = 0; + struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev); + MSM_BUS_DBG("Committing: fabid: %d\n", fabdev->id); + ret = fabdev->algo->commit(fabdev); + return ret; +} + +static uint32_t register_client_legacy(struct msm_bus_scale_pdata *pdata) +{ + struct msm_bus_client *client = NULL; + int i; + int src, dest, nfab; + struct msm_bus_fabric_device *deffab; + + deffab = msm_bus_get_fabric_device(MSM_BUS_FAB_DEFAULT); + if (!deffab) { + MSM_BUS_ERR("Error finding default fabric\n"); + return 0; + } + + nfab = msm_bus_get_num_fab(); + if (nfab < deffab->board_algo->board_nfab) { + MSM_BUS_ERR("Can't register client!\n" + "Num of fabrics up: %d\n", + nfab); + return 0; + } + + if ((!pdata) || (pdata->usecase->num_paths == 0) || IS_ERR(pdata)) { + MSM_BUS_ERR("Cannot register client with null data\n"); + return 0; + } + + client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL); + if (!client) { + MSM_BUS_ERR("Error allocating client\n"); + return 0; + } + + mutex_lock(&msm_bus_lock); + client->pdata = pdata; + client->curr = -1; + for (i = 0; i < pdata->usecase->num_paths; i++) { + int *pnode; + struct msm_bus_fabric_device *srcfab; + pnode = krealloc(client->src_pnode, ((i + 1) * sizeof(int)), + GFP_KERNEL); + if (ZERO_OR_NULL_PTR(pnode)) { + MSM_BUS_ERR("Invalid Pnode ptr!\n"); + continue; + } else + client->src_pnode = pnode; + + if (!IS_MASTER_VALID(pdata->usecase->vectors[i].src)) { + MSM_BUS_ERR("Invalid Master ID %d in request!\n", + pdata->usecase->vectors[i].src); + goto err; + } + + if (!IS_SLAVE_VALID(pdata->usecase->vectors[i].dst)) { + MSM_BUS_ERR("Invalid Slave ID %d in request!\n", + pdata->usecase->vectors[i].dst); + goto err; + } + + src = msm_bus_board_get_iid(pdata->usecase->vectors[i].src); + if (src == -ENXIO) { + MSM_BUS_ERR("Master %d not supported. Client cannot be" + " registered\n", + pdata->usecase->vectors[i].src); + goto err; + } + dest = msm_bus_board_get_iid(pdata->usecase->vectors[i].dst); + if (dest == -ENXIO) { + MSM_BUS_ERR("Slave %d not supported. Client cannot be" + " registered\n", + pdata->usecase->vectors[i].dst); + goto err; + } + srcfab = msm_bus_get_fabric_device(GET_FABID(src)); + if (!srcfab) { + MSM_BUS_ERR("Fabric not found\n"); + goto err; + } + + srcfab->visited = true; + pnode[i] = getpath(src, dest); + bus_for_each_dev(&msm_bus_type, NULL, NULL, clearvisitedflag); + if (pnode[i] == -ENXIO) { + MSM_BUS_ERR("Cannot register client now! Try again!\n"); + goto err; + } + } + msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER, + (uint32_t)client); + mutex_unlock(&msm_bus_lock); + MSM_BUS_DBG("ret: %u num_paths: %d\n", (uint32_t)client, + pdata->usecase->num_paths); + return (uint32_t)(client); +err: + kfree(client->src_pnode); + kfree(client); + mutex_unlock(&msm_bus_lock); + return 0; +} + +static int update_request_legacy(uint32_t cl, unsigned index) +{ + int i, ret = 0; + struct msm_bus_scale_pdata *pdata; + int pnode, src = 0, curr, ctx; + uint64_t req_clk = 0, req_bw = 0, curr_clk = 0, curr_bw = 0; + struct msm_bus_client *client = (struct msm_bus_client *)cl; + if (IS_ERR_OR_NULL(client)) { + MSM_BUS_ERR("msm_bus_scale_client update req error %d\n", + (uint32_t)client); + return -ENXIO; + } + + mutex_lock(&msm_bus_lock); + if (client->curr == index) + goto err; + + curr = client->curr; + pdata = client->pdata; + if (!pdata) { + MSM_BUS_ERR("Null pdata passed to update-request\n"); + ret = -ENXIO; + goto err; + } + + if (index >= pdata->num_usecases) { + MSM_BUS_ERR("Client %u passed invalid index: %d\n", + (uint32_t)client, index); + ret = -ENXIO; + goto err; + } + + MSM_BUS_DBG("cl: %u index: %d curr: %d num_paths: %d\n", + cl, index, client->curr, client->pdata->usecase->num_paths); + + for (i = 0; i < pdata->usecase->num_paths; i++) { + src = msm_bus_board_get_iid(client->pdata->usecase[index]. + vectors[i].src); + if (src == -ENXIO) { + MSM_BUS_ERR("Master %d not supported. Request cannot" + " be updated\n", client->pdata->usecase-> + vectors[i].src); + goto err; + } + + if (msm_bus_board_get_iid(client->pdata->usecase[index]. + vectors[i].dst) == -ENXIO) { + MSM_BUS_ERR("Slave %d not supported. Request cannot" + " be updated\n", client->pdata->usecase-> + vectors[i].dst); + } + + pnode = client->src_pnode[i]; + req_clk = client->pdata->usecase[index].vectors[i].ib; + req_bw = client->pdata->usecase[index].vectors[i].ab; + if (curr < 0) { + curr_clk = 0; + curr_bw = 0; + } else { + curr_clk = client->pdata->usecase[curr].vectors[i].ib; + curr_bw = client->pdata->usecase[curr].vectors[i].ab; + MSM_BUS_DBG("ab: %llu ib: %llu\n", curr_bw, curr_clk); + } + + if (!pdata->active_only) { + ret = update_path(src, pnode, req_clk, req_bw, + curr_clk, curr_bw, 0, pdata->active_only); + if (ret) { + MSM_BUS_ERR("Update path failed! %d\n", ret); + goto err; + } + } + + ret = update_path(src, pnode, req_clk, req_bw, curr_clk, + curr_bw, ACTIVE_CTX, pdata->active_only); + if (ret) { + MSM_BUS_ERR("Update Path failed! %d\n", ret); + goto err; + } + } + + client->curr = index; + ctx = ACTIVE_CTX; + msm_bus_dbg_client_data(client->pdata, index, cl); + bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_commit_fn); + + /* For NR/RT limited masters, if freq is going up , apply the changes + * after we commit clk data. + */ + if (is_nr_lim(src) && ((req_clk > curr_clk) || (req_bw > curr_bw))) + bus_for_each_dev(&msm_bus_type, NULL, NULL, + msm_bus_commit_limiter); + +err: + mutex_unlock(&msm_bus_lock); + return ret; +} + +static int reset_pnodes(int curr, int pnode) +{ + struct msm_bus_inode_info *info; + struct msm_bus_fabric_device *fabdev; + int index, next_pnode; + fabdev = msm_bus_get_fabric_device(GET_FABID(curr)); + if (!fabdev) { + MSM_BUS_ERR("Fabric not found for: %d\n", + (GET_FABID(curr))); + return -ENXIO; + } + + index = GET_INDEX(pnode); + info = fabdev->algo->find_node(fabdev, curr); + if (!info) { + MSM_BUS_ERR("Cannot find node info!\n"); + return -ENXIO; + } + + MSM_BUS_DBG("Starting the loop--remove\n"); + do { + struct msm_bus_inode_info *hop; + fabdev = msm_bus_get_fabric_device(GET_FABID(curr)); + if (!fabdev) { + MSM_BUS_ERR("Fabric not found\n"); + return -ENXIO; + } + + next_pnode = info->pnode[index].next; + info->pnode[index].next = -2; + curr = GET_NODE(next_pnode); + index = GET_INDEX(next_pnode); + if (IS_NODE(curr)) + hop = fabdev->algo->find_node(fabdev, curr); + else + hop = fabdev->algo->find_gw_node(fabdev, curr); + if (!hop) { + MSM_BUS_ERR("Null Info found for hop\n"); + return -ENXIO; + } + + MSM_BUS_DBG("%d[%d] = %d\n", info->node_info->priv_id, index, + info->pnode[index].next); + MSM_BUS_DBG("num_pnodes: %d: %d\n", info->node_info->priv_id, + info->num_pnodes); + info = hop; + } while (GET_NODE(info->pnode[index].next) != info->node_info->priv_id); + + info->pnode[index].next = -2; + MSM_BUS_DBG("%d[%d] = %d\n", info->node_info->priv_id, index, + info->pnode[index].next); + MSM_BUS_DBG("num_pnodes: %d: %d\n", info->node_info->priv_id, + info->num_pnodes); + return 0; +} + +int msm_bus_board_get_iid(int id) +{ + struct msm_bus_fabric_device *deffab; + + deffab = msm_bus_get_fabric_device(MSM_BUS_FAB_DEFAULT); + if (!deffab) { + MSM_BUS_ERR("Error finding default fabric\n"); + return -ENXIO; + } + + return deffab->board_algo->get_iid(id); +} + +void msm_bus_scale_client_reset_pnodes(uint32_t cl) +{ + int i, src, pnode, index; + struct msm_bus_client *client = (struct msm_bus_client *)(cl); + if (IS_ERR_OR_NULL(client)) { + MSM_BUS_ERR("msm_bus_scale_reset_pnodes error\n"); + return; + } + index = 0; + for (i = 0; i < client->pdata->usecase->num_paths; i++) { + src = msm_bus_board_get_iid( + client->pdata->usecase[index].vectors[i].src); + pnode = client->src_pnode[i]; + MSM_BUS_DBG("(%d, %d)\n", GET_NODE(pnode), GET_INDEX(pnode)); + reset_pnodes(src, pnode); + } +} + +static void unregister_client_legacy(uint32_t cl) +{ + int i; + struct msm_bus_client *client = (struct msm_bus_client *)(cl); + bool warn = false; + if (IS_ERR_OR_NULL(client)) + return; + + for (i = 0; i < client->pdata->usecase->num_paths; i++) { + if ((client->pdata->usecase[0].vectors[i].ab) || + (client->pdata->usecase[0].vectors[i].ib)) { + warn = true; + break; + } + } + + if (warn) { + int num_paths = client->pdata->usecase->num_paths; + int ab[num_paths], ib[num_paths]; + WARN(1, "%s called unregister with non-zero vectors\n", + client->pdata->name); + + /* + * Save client values and zero them out to + * cleanly unregister + */ + for (i = 0; i < num_paths; i++) { + ab[i] = client->pdata->usecase[0].vectors[i].ab; + ib[i] = client->pdata->usecase[0].vectors[i].ib; + client->pdata->usecase[0].vectors[i].ab = 0; + client->pdata->usecase[0].vectors[i].ib = 0; + } + + msm_bus_scale_client_update_request(cl, 0); + + /* Restore client vectors if required for re-registering. */ + for (i = 0; i < num_paths; i++) { + client->pdata->usecase[0].vectors[i].ab = ab[i]; + client->pdata->usecase[0].vectors[i].ib = ib[i]; + } + } else if (client->curr != 0) + msm_bus_scale_client_update_request(cl, 0); + + MSM_BUS_DBG("Unregistering client %d\n", cl); + mutex_lock(&msm_bus_lock); + msm_bus_scale_client_reset_pnodes(cl); + msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl); + mutex_unlock(&msm_bus_lock); + kfree(client->src_pnode); + kfree(client); +} + +void msm_bus_arb_setops_legacy(struct msm_bus_arb_ops *arb_ops) +{ + arb_ops->register_client = register_client_legacy; + arb_ops->update_request = update_request_legacy; + arb_ops->unregister_client = unregister_client_legacy; +} + diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c new file mode 100644 index 000000000000..324d66663d77 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c @@ -0,0 +1,1120 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is Mree software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/clk.h> +#include <linux/msm-bus.h> +#include "msm_bus_core.h" +#include "msm_bus_adhoc.h" +#include <trace/events/trace_msm_bus.h> + +#define NUM_CL_HANDLES 50 +#define NUM_LNODES 3 +#define MAX_STR_CL 50 + +struct bus_search_type { + struct list_head link; + struct list_head node_list; +}; + +struct handle_type { + int num_entries; + struct msm_bus_client **cl_list; +}; + +static struct handle_type handle_list; +struct list_head input_list; +struct list_head apply_list; + +DEFINE_MUTEX(msm_bus_adhoc_lock); + +static bool chk_bl_list(struct list_head *black_list, unsigned int id) +{ + struct msm_bus_node_device_type *bus_node = NULL; + + list_for_each_entry(bus_node, black_list, link) { + if (bus_node->node_info->id == id) + return true; + } + return false; +} + +static void copy_remaining_nodes(struct list_head *edge_list, struct list_head + *traverse_list, struct list_head *route_list) +{ + struct bus_search_type *search_node; + + if (list_empty(edge_list) && list_empty(traverse_list)) + return; + + search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL); + INIT_LIST_HEAD(&search_node->node_list); + list_splice_init(edge_list, traverse_list); + list_splice_init(traverse_list, &search_node->node_list); + list_add_tail(&search_node->link, route_list); +} + +/* + * Duplicate instantiaion from msm_bus_arb.c. Todo there needs to be a + * "util" file for these common func/macros. + * + * */ +uint64_t msm_bus_div64(unsigned int w, uint64_t bw) +{ + uint64_t *b = &bw; + + if ((bw > 0) && (bw < w)) + return 1; + + switch (w) { + case 0: + WARN(1, "AXI: Divide by 0 attempted\n"); + case 1: return bw; + case 2: return (bw >> 1); + case 4: return (bw >> 2); + case 8: return (bw >> 3); + case 16: return (bw >> 4); + case 32: return (bw >> 5); + } + + do_div(*b, w); + return *b; +} + +int msm_bus_device_match_adhoc(struct device *dev, void *id) +{ + int ret = 0; + struct msm_bus_node_device_type *bnode = dev->platform_data; + + if (bnode) + ret = (bnode->node_info->id == *(unsigned int *)id); + else + ret = 0; + + return ret; +} + +static int gen_lnode(struct device *dev, + int next_hop, int prev_idx) +{ + struct link_node *lnode; + struct msm_bus_node_device_type *cur_dev = NULL; + int lnode_idx = -1; + + if (!dev) + goto exit_gen_lnode; + + cur_dev = dev->platform_data; + if (!cur_dev) { + MSM_BUS_ERR("%s: Null device ptr", __func__); + goto exit_gen_lnode; + } + + if (!cur_dev->num_lnodes) { + cur_dev->lnode_list = devm_kzalloc(dev, + sizeof(struct link_node) * NUM_LNODES, + GFP_KERNEL); + if (!cur_dev->lnode_list) + goto exit_gen_lnode; + + lnode = cur_dev->lnode_list; + cur_dev->num_lnodes = NUM_LNODES; + lnode_idx = 0; + } else { + int i; + for (i = 0; i < cur_dev->num_lnodes; i++) { + if (!cur_dev->lnode_list[i].in_use) + break; + } + + if (i < cur_dev->num_lnodes) { + lnode = &cur_dev->lnode_list[i]; + lnode_idx = i; + } else { + struct link_node *realloc_list; + size_t cur_size = sizeof(struct link_node) * + cur_dev->num_lnodes; + + cur_dev->num_lnodes += NUM_LNODES; + realloc_list = msm_bus_realloc_devmem( + dev, + cur_dev->lnode_list, + cur_size, + sizeof(struct link_node) * + cur_dev->num_lnodes, GFP_KERNEL); + + if (!realloc_list) + goto exit_gen_lnode; + + cur_dev->lnode_list = realloc_list; + lnode = &cur_dev->lnode_list[i]; + lnode_idx = i; + } + } + + lnode->in_use = 1; + if (next_hop == cur_dev->node_info->id) { + lnode->next = -1; + lnode->next_dev = NULL; + } else { + lnode->next = prev_idx; + lnode->next_dev = bus_find_device(&msm_bus_type, NULL, + (void *) &next_hop, + msm_bus_device_match_adhoc); + } + + memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX); + memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX); + +exit_gen_lnode: + return lnode_idx; +} + +static int remove_lnode(struct msm_bus_node_device_type *cur_dev, + int lnode_idx) +{ + int ret = 0; + + if (!cur_dev) { + MSM_BUS_ERR("%s: Null device ptr", __func__); + ret = -ENODEV; + goto exit_remove_lnode; + } + + if (lnode_idx != -1) { + if (!cur_dev->num_lnodes || + (lnode_idx > (cur_dev->num_lnodes - 1))) { + MSM_BUS_ERR("%s: Invalid Idx %d, num_lnodes %d", + __func__, lnode_idx, cur_dev->num_lnodes); + ret = -ENODEV; + goto exit_remove_lnode; + } + + cur_dev->lnode_list[lnode_idx].next = -1; + cur_dev->lnode_list[lnode_idx].next_dev = NULL; + cur_dev->lnode_list[lnode_idx].in_use = 0; + } + +exit_remove_lnode: + return ret; +} + +static int prune_path(struct list_head *route_list, int dest, int src, + struct list_head *black_list, int found) +{ + struct bus_search_type *search_node, *temp_search_node; + struct msm_bus_node_device_type *bus_node; + struct list_head *bl_list; + struct list_head *temp_bl_list; + int search_dev_id = dest; + struct device *dest_dev = bus_find_device(&msm_bus_type, NULL, + (void *) &dest, + msm_bus_device_match_adhoc); + int lnode_hop = -1; + + if (!found) + goto reset_links; + + if (!dest_dev) { + MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest); + goto exit_prune_path; + } + + lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop); + + list_for_each_entry_reverse(search_node, route_list, link) { + list_for_each_entry(bus_node, &search_node->node_list, link) { + unsigned int i; + for (i = 0; i < bus_node->node_info->num_connections; + i++) { + if (bus_node->node_info->connections[i] == + search_dev_id) { + dest_dev = bus_find_device( + &msm_bus_type, + NULL, + (void *) + &bus_node->node_info-> + id, + msm_bus_device_match_adhoc); + + if (!dest_dev) { + lnode_hop = -1; + goto reset_links; + } + + lnode_hop = gen_lnode(dest_dev, + search_dev_id, + lnode_hop); + search_dev_id = + bus_node->node_info->id; + break; + } + } + } + } +reset_links: + list_for_each_entry_safe(search_node, temp_search_node, route_list, + link) { + list_for_each_entry(bus_node, &search_node->node_list, + link) + bus_node->node_info->is_traversed = false; + + list_del(&search_node->link); + kfree(search_node); + } + + list_for_each_safe(bl_list, temp_bl_list, black_list) + list_del(bl_list); + +exit_prune_path: + return lnode_hop; +} + +static void setup_bl_list(struct msm_bus_node_device_type *node, + struct list_head *black_list) +{ + unsigned int i; + + for (i = 0; i < node->node_info->num_blist; i++) { + struct msm_bus_node_device_type *bdev; + bdev = node->node_info->black_connections[i]->platform_data; + list_add_tail(&bdev->link, black_list); + } +} + +static int getpath(int src, int dest) +{ + struct list_head traverse_list; + struct list_head edge_list; + struct list_head route_list; + struct list_head black_list; + struct device *src_dev = bus_find_device(&msm_bus_type, NULL, + (void *) &src, + msm_bus_device_match_adhoc); + struct msm_bus_node_device_type *src_node; + struct bus_search_type *search_node; + int found = 0; + int depth_index = 0; + int first_hop = -1; + + INIT_LIST_HEAD(&traverse_list); + INIT_LIST_HEAD(&edge_list); + INIT_LIST_HEAD(&route_list); + INIT_LIST_HEAD(&black_list); + + if (!src_dev) { + MSM_BUS_ERR("%s: Cannot locate src dev %d", __func__, src); + goto exit_getpath; + } + + src_node = src_dev->platform_data; + if (!src_node) { + MSM_BUS_ERR("%s:Fatal, Source dev %d not found", __func__, src); + goto exit_getpath; + } + list_add_tail(&src_node->link, &traverse_list); + + while ((!found && !list_empty(&traverse_list))) { + struct msm_bus_node_device_type *bus_node = NULL; + /* Locate dest_id in the traverse list */ + list_for_each_entry(bus_node, &traverse_list, link) { + if (bus_node->node_info->id == dest) { + found = 1; + break; + } + } + + if (!found) { + unsigned int i; + /* Setup the new edge list */ + list_for_each_entry(bus_node, &traverse_list, link) { + /* Setup list of black-listed nodes */ + setup_bl_list(bus_node, &black_list); + + for (i = 0; i < bus_node->node_info-> + num_connections; i++) { + bool skip; + struct msm_bus_node_device_type + *node_conn; + node_conn = bus_node->node_info-> + dev_connections[i]-> + platform_data; + if (node_conn->node_info-> + is_traversed) { + MSM_BUS_ERR("Circ Path %d\n", + node_conn->node_info->id); + goto reset_traversed; + } + skip = chk_bl_list(&black_list, + bus_node->node_info-> + connections[i]); + if (!skip) { + list_add_tail(&node_conn->link, + &edge_list); + node_conn->node_info-> + is_traversed = true; + } + } + } + + /* Keep tabs of the previous search list */ + search_node = kzalloc(sizeof(struct bus_search_type), + GFP_KERNEL); + INIT_LIST_HEAD(&search_node->node_list); + list_splice_init(&traverse_list, + &search_node->node_list); + /* Add the previous search list to a route list */ + list_add_tail(&search_node->link, &route_list); + /* Advancing the list depth */ + depth_index++; + list_splice_init(&edge_list, &traverse_list); + } + } +reset_traversed: + copy_remaining_nodes(&edge_list, &traverse_list, &route_list); + first_hop = prune_path(&route_list, dest, src, &black_list, found); + +exit_getpath: + return first_hop; +} + +static uint64_t arbitrate_bus_req(struct msm_bus_node_device_type *bus_dev, + int ctx) +{ + int i; + uint64_t max_ib = 0; + uint64_t sum_ab = 0; + uint64_t bw_max_hz; + struct msm_bus_node_device_type *fab_dev = NULL; + + /* Find max ib */ + for (i = 0; i < bus_dev->num_lnodes; i++) { + max_ib = max(max_ib, bus_dev->lnode_list[i].lnode_ib[ctx]); + sum_ab += bus_dev->lnode_list[i].lnode_ab[ctx]; + } + /* + * Account for Util factor and vrail comp. The new aggregation + * formula is: + * Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp) + * / bus-width + * util_fact and vrail comp are obtained from fabric's dts properties. + * They default to 100 if absent. + */ + fab_dev = bus_dev->node_info->bus_device->platform_data; + + /* Don't do this for virtual fabrics */ + if (fab_dev && fab_dev->fabdev) { + sum_ab *= fab_dev->fabdev->util_fact; + sum_ab = msm_bus_div64(100, sum_ab); + max_ib *= 100; + max_ib = msm_bus_div64(fab_dev->fabdev->vrail_comp, max_ib); + } + + /* Account for multiple channels if any */ + if (bus_dev->node_info->num_qports > 1) + sum_ab = msm_bus_div64(bus_dev->node_info->num_qports, + sum_ab); + + if (!bus_dev->node_info->buswidth) { + MSM_BUS_WARN("No bus width found for %d. Using default\n", + bus_dev->node_info->id); + bus_dev->node_info->buswidth = 8; + } + + bw_max_hz = max(max_ib, sum_ab); + bw_max_hz = msm_bus_div64(bus_dev->node_info->buswidth, + bw_max_hz); + + return bw_max_hz; +} + +static void del_inp_list(struct list_head *list) +{ + struct rule_update_path_info *rule_node; + struct rule_update_path_info *rule_node_tmp; + + list_for_each_entry_safe(rule_node, rule_node_tmp, list, link) + list_del(&rule_node->link); +} + +static void del_op_list(struct list_head *list) +{ + struct rule_apply_rcm_info *rule; + struct rule_apply_rcm_info *rule_tmp; + + list_for_each_entry_safe(rule, rule_tmp, list, link) + list_del(&rule->link); +} + +static int msm_bus_apply_rules(struct list_head *list, bool after_clk_commit) +{ + struct rule_apply_rcm_info *rule; + struct device *dev = NULL; + struct msm_bus_node_device_type *dev_info = NULL; + int ret = 0; + bool throttle_en = false; + + list_for_each_entry(rule, list, link) { + if (!rule) + continue; + + if (rule && (rule->after_clk_commit != after_clk_commit)) + continue; + + dev = bus_find_device(&msm_bus_type, NULL, + (void *) &rule->id, + msm_bus_device_match_adhoc); + + if (!dev) { + MSM_BUS_ERR("Can't find dev node for %d", rule->id); + continue; + } + dev_info = dev->platform_data; + + throttle_en = ((rule->throttle == THROTTLE_ON) ? true : false); + ret = msm_bus_enable_limiter(dev_info, throttle_en, + rule->lim_bw); + if (ret) + MSM_BUS_ERR("Failed to set limiter for %d", rule->id); + } + + return ret; +} + +static uint64_t get_node_aggab(struct msm_bus_node_device_type *bus_dev) +{ + int i; + int ctx; + uint64_t max_agg_ab = 0; + uint64_t agg_ab = 0; + + for (ctx = 0; ctx < NUM_CTX; ctx++) { + for (i = 0; i < bus_dev->num_lnodes; i++) + agg_ab += bus_dev->lnode_list[i].lnode_ab[ctx]; + + if (bus_dev->node_info->num_qports > 1) + agg_ab = msm_bus_div64(bus_dev->node_info->num_qports, + agg_ab); + + max_agg_ab = max(max_agg_ab, agg_ab); + } + + return max_agg_ab; +} + +static uint64_t get_node_ib(struct msm_bus_node_device_type *bus_dev) +{ + int i; + int ctx; + uint64_t max_ib = 0; + + for (ctx = 0; ctx < NUM_CTX; ctx++) { + for (i = 0; i < bus_dev->num_lnodes; i++) + max_ib = max(max_ib, + bus_dev->lnode_list[i].lnode_ib[ctx]); + } + return max_ib; +} + +static int update_path(int src, int dest, uint64_t req_ib, uint64_t req_bw, + uint64_t cur_ib, uint64_t cur_bw, int src_idx, int ctx) +{ + struct device *src_dev = NULL; + struct device *next_dev = NULL; + struct link_node *lnode = NULL; + struct msm_bus_node_device_type *dev_info = NULL; + int curr_idx; + int ret = 0; + int *dirty_nodes = NULL; + int num_dirty = 0; + struct rule_update_path_info *rule_node; + bool rules_registered = msm_rule_are_rules_registered(); + + src_dev = bus_find_device(&msm_bus_type, NULL, + (void *) &src, + msm_bus_device_match_adhoc); + + if (!src_dev) { + MSM_BUS_ERR("%s: Can't find source device %d", __func__, src); + ret = -ENODEV; + goto exit_update_path; + } + + next_dev = src_dev; + + if (src_idx < 0) { + MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx); + ret = -ENXIO; + goto exit_update_path; + } + curr_idx = src_idx; + + INIT_LIST_HEAD(&input_list); + INIT_LIST_HEAD(&apply_list); + + while (next_dev) { + dev_info = next_dev->platform_data; + + if (curr_idx >= dev_info->num_lnodes) { + MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d", + __func__, curr_idx, dev_info->num_lnodes); + ret = -ENXIO; + goto exit_update_path; + } + + lnode = &dev_info->lnode_list[curr_idx]; + lnode->lnode_ib[ctx] = req_ib; + lnode->lnode_ab[ctx] = req_bw; + + dev_info->cur_clk_hz[ctx] = arbitrate_bus_req(dev_info, ctx); + + /* Start updating the clocks at the first hop. + * Its ok to figure out the aggregated + * request at this node. + */ + if (src_dev != next_dev) { + ret = msm_bus_update_clks(dev_info, ctx, &dirty_nodes, + &num_dirty); + if (ret) { + MSM_BUS_ERR("%s: Failed to update clks dev %d", + __func__, dev_info->node_info->id); + goto exit_update_path; + } + } + + ret = msm_bus_update_bw(dev_info, ctx, req_bw, &dirty_nodes, + &num_dirty); + if (ret) { + MSM_BUS_ERR("%s: Failed to update bw dev %d", + __func__, dev_info->node_info->id); + goto exit_update_path; + } + + if (rules_registered) { + rule_node = &dev_info->node_info->rule; + rule_node->id = dev_info->node_info->id; + rule_node->ib = get_node_ib(dev_info); + rule_node->ab = get_node_aggab(dev_info); + rule_node->clk = max(dev_info->cur_clk_hz[ACTIVE_CTX], + dev_info->cur_clk_hz[DUAL_CTX]); + list_add_tail(&rule_node->link, &input_list); + } + + next_dev = lnode->next_dev; + curr_idx = lnode->next; + } + + if (rules_registered) { + msm_rules_update_path(&input_list, &apply_list); + msm_bus_apply_rules(&apply_list, false); + } + + msm_bus_commit_data(dirty_nodes, ctx, num_dirty); + + if (rules_registered) { + msm_bus_apply_rules(&apply_list, true); + del_inp_list(&input_list); + del_op_list(&apply_list); + } +exit_update_path: + return ret; +} + +static int remove_path(int src, int dst, uint64_t cur_ib, uint64_t cur_ab, + int src_idx, int active_only) +{ + struct device *src_dev = NULL; + struct device *next_dev = NULL; + struct link_node *lnode = NULL; + struct msm_bus_node_device_type *dev_info = NULL; + int ret = 0; + int cur_idx = src_idx; + int next_idx; + + /* Update the current path to zero out all request from + * this cient on all paths + */ + + ret = update_path(src, dst, 0, 0, cur_ib, cur_ab, src_idx, + active_only); + if (ret) { + MSM_BUS_ERR("%s: Error zeroing out path ctx %d", + __func__, ACTIVE_CTX); + goto exit_remove_path; + } + + src_dev = bus_find_device(&msm_bus_type, NULL, + (void *) &src, + msm_bus_device_match_adhoc); + if (!src_dev) { + MSM_BUS_ERR("%s: Can't find source device %d", __func__, src); + ret = -ENODEV; + goto exit_remove_path; + } + + next_dev = src_dev; + + while (next_dev) { + dev_info = next_dev->platform_data; + lnode = &dev_info->lnode_list[cur_idx]; + next_idx = lnode->next; + next_dev = lnode->next_dev; + remove_lnode(dev_info, cur_idx); + cur_idx = next_idx; + } + +exit_remove_path: + return ret; +} + +static void getpath_debug(int src, int curr, int active_only) +{ + struct device *dev_node; + struct device *dev_it; + unsigned int hop = 1; + int idx; + struct msm_bus_node_device_type *devinfo; + int i; + + dev_node = bus_find_device(&msm_bus_type, NULL, + (void *) &src, + msm_bus_device_match_adhoc); + + if (!dev_node) { + MSM_BUS_ERR("SRC NOT FOUND %d", src); + return; + } + + idx = curr; + devinfo = dev_node->platform_data; + dev_it = dev_node; + + MSM_BUS_ERR("Route list Src %d", src); + while (dev_it) { + struct msm_bus_node_device_type *busdev = + devinfo->node_info->bus_device->platform_data; + + MSM_BUS_ERR("Hop[%d] at Device %d ctx %d", hop, + devinfo->node_info->id, active_only); + + for (i = 0; i < NUM_CTX; i++) { + MSM_BUS_ERR("dev info sel ib %llu", + devinfo->cur_clk_hz[i]); + MSM_BUS_ERR("dev info sel ab %llu", + devinfo->node_ab.ab[i]); + } + + dev_it = devinfo->lnode_list[idx].next_dev; + idx = devinfo->lnode_list[idx].next; + if (dev_it) + devinfo = dev_it->platform_data; + + MSM_BUS_ERR("Bus Device %d", busdev->node_info->id); + MSM_BUS_ERR("Bus Clock %llu", busdev->clk[active_only].rate); + + if (idx < 0) + break; + hop++; + } +} + +static void unregister_client_adhoc(uint32_t cl) +{ + int i; + struct msm_bus_scale_pdata *pdata; + int lnode, src, curr, dest; + uint64_t cur_clk, cur_bw; + struct msm_bus_client *client; + + mutex_lock(&msm_bus_adhoc_lock); + if (!cl) { + MSM_BUS_ERR("%s: Null cl handle passed unregister\n", + __func__); + goto exit_unregister_client; + } + client = handle_list.cl_list[cl]; + pdata = client->pdata; + if (!pdata) { + MSM_BUS_ERR("%s: Null pdata passed to unregister\n", + __func__); + goto exit_unregister_client; + } + + curr = client->curr; + if (curr >= pdata->num_usecases) { + MSM_BUS_ERR("Invalid index Defaulting curr to 0"); + curr = 0; + } + + MSM_BUS_DBG("%s: Unregistering client %p", __func__, client); + + for (i = 0; i < pdata->usecase->num_paths; i++) { + src = client->pdata->usecase[curr].vectors[i].src; + dest = client->pdata->usecase[curr].vectors[i].dst; + + lnode = client->src_pnode[i]; + cur_clk = client->pdata->usecase[curr].vectors[i].ib; + cur_bw = client->pdata->usecase[curr].vectors[i].ab; + remove_path(src, dest, cur_clk, cur_bw, lnode, + pdata->active_only); + } + msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl); + kfree(client->src_pnode); + kfree(client); + handle_list.cl_list[cl] = NULL; +exit_unregister_client: + mutex_unlock(&msm_bus_adhoc_lock); + return; +} + +static int alloc_handle_lst(int size) +{ + int ret = 0; + struct msm_bus_client **t_cl_list; + + if (!handle_list.num_entries) { + t_cl_list = kzalloc(sizeof(struct msm_bus_client *) + * NUM_CL_HANDLES, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(t_cl_list)) { + ret = -ENOMEM; + MSM_BUS_ERR("%s: Failed to allocate handles list", + __func__); + goto exit_alloc_handle_lst; + } + handle_list.cl_list = t_cl_list; + handle_list.num_entries += NUM_CL_HANDLES; + } else { + t_cl_list = krealloc(handle_list.cl_list, + sizeof(struct msm_bus_client *) * + handle_list.num_entries + NUM_CL_HANDLES, + GFP_KERNEL); + if (ZERO_OR_NULL_PTR(t_cl_list)) { + ret = -ENOMEM; + MSM_BUS_ERR("%s: Failed to allocate handles list", + __func__); + goto exit_alloc_handle_lst; + } + + memset(&handle_list.cl_list[handle_list.num_entries], 0, + NUM_CL_HANDLES * sizeof(struct msm_bus_client *)); + handle_list.num_entries += NUM_CL_HANDLES; + handle_list.cl_list = t_cl_list; + } +exit_alloc_handle_lst: + return ret; +} + +static uint32_t gen_handle(struct msm_bus_client *client) +{ + uint32_t handle = 0; + int i; + int ret = 0; + + for (i = 0; i < handle_list.num_entries; i++) { + if (i && !handle_list.cl_list[i]) { + handle = i; + break; + } + } + + if (!handle) { + ret = alloc_handle_lst(NUM_CL_HANDLES); + + if (ret) { + MSM_BUS_ERR("%s: Failed to allocate handle list", + __func__); + goto exit_gen_handle; + } + handle = i + 1; + } + handle_list.cl_list[handle] = client; +exit_gen_handle: + return handle; +} + +static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata) +{ + int src, dest; + int i; + struct msm_bus_client *client = NULL; + int *lnode; + uint32_t handle = 0; + + mutex_lock(&msm_bus_adhoc_lock); + client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL); + if (!client) { + MSM_BUS_ERR("%s: Error allocating client data", __func__); + goto exit_register_client; + } + client->pdata = pdata; + + lnode = kzalloc(pdata->usecase->num_paths * sizeof(int), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(lnode)) { + MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__); + goto exit_register_client; + } + client->src_pnode = lnode; + + for (i = 0; i < pdata->usecase->num_paths; i++) { + src = pdata->usecase->vectors[i].src; + dest = pdata->usecase->vectors[i].dst; + + if ((src < 0) || (dest < 0)) { + MSM_BUS_ERR("%s:Invalid src/dst.src %d dest %d", + __func__, src, dest); + goto exit_register_client; + } + + lnode[i] = getpath(src, dest); + if (lnode[i] < 0) { + MSM_BUS_ERR("%s:Failed to find path.src %d dest %d", + __func__, src, dest); + goto exit_register_client; + } + } + + handle = gen_handle(client); + msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER, + handle); + MSM_BUS_DBG("%s:Client handle %d %s", __func__, handle, + client->pdata->name); +exit_register_client: + mutex_unlock(&msm_bus_adhoc_lock); + return handle; +} + +static int update_request_adhoc(uint32_t cl, unsigned int index) +{ + int i, ret = 0; + struct msm_bus_scale_pdata *pdata; + int lnode, src, curr, dest; + uint64_t req_clk, req_bw, curr_clk, curr_bw; + struct msm_bus_client *client; + const char *test_cl = "Null"; + bool log_transaction = false; + + mutex_lock(&msm_bus_adhoc_lock); + + if (!cl) { + MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl); + ret = -ENXIO; + goto exit_update_request; + } + + client = handle_list.cl_list[cl]; + pdata = client->pdata; + if (!pdata) { + MSM_BUS_ERR("%s: Client data Null.[client didn't register]", + __func__); + ret = -ENXIO; + goto exit_update_request; + } + + if (index >= pdata->num_usecases) { + MSM_BUS_ERR("Client %u passed invalid index: %d\n", + cl, index); + ret = -ENXIO; + goto exit_update_request; + } + + if (client->curr == index) { + MSM_BUS_DBG("%s: Not updating client request idx %d unchanged", + __func__, index); + goto exit_update_request; + } + + curr = client->curr; + client->curr = index; + + if (!strcmp(test_cl, pdata->name)) + log_transaction = true; + + MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__, + cl, index, client->curr, client->pdata->usecase->num_paths); + + msm_bus_dbg_client_data(client->pdata, index , cl); + for (i = 0; i < pdata->usecase->num_paths; i++) { + src = client->pdata->usecase[index].vectors[i].src; + dest = client->pdata->usecase[index].vectors[i].dst; + + lnode = client->src_pnode[i]; + req_clk = client->pdata->usecase[index].vectors[i].ib; + req_bw = client->pdata->usecase[index].vectors[i].ab; + if (curr < 0) { + curr_clk = 0; + curr_bw = 0; + } else { + curr_clk = client->pdata->usecase[curr].vectors[i].ib; + curr_bw = client->pdata->usecase[curr].vectors[i].ab; + MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__, + curr_bw, curr_clk); + } + + ret = update_path(src, dest, req_clk, req_bw, + curr_clk, curr_bw, lnode, pdata->active_only); + + if (ret) { + MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n", + __func__, ret, ACTIVE_CTX); + goto exit_update_request; + } + + if (log_transaction) + getpath_debug(src, lnode, pdata->active_only); + } + trace_bus_update_request_end(pdata->name); +exit_update_request: + mutex_unlock(&msm_bus_adhoc_lock); + return ret; +} + +static void free_cl_mem(struct msm_bus_client_handle *cl) +{ + if (cl) { + kfree(cl->name); + kfree(cl); + cl = NULL; + } +} + +static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib) +{ + int ret = 0; + char *test_cl = "test-client"; + bool log_transaction = false; + + mutex_lock(&msm_bus_adhoc_lock); + + if (!cl) { + MSM_BUS_ERR("%s: Invalid client handle %p", __func__, cl); + ret = -ENXIO; + goto exit_update_request; + } + + if (!strcmp(test_cl, cl->name)) + log_transaction = true; + + msm_bus_dbg_rec_transaction(cl, ab, ib); + + if ((cl->cur_ib == ib) && (cl->cur_ab == ab)) { + MSM_BUS_DBG("%s:no change in request", cl->name); + goto exit_update_request; + } + + ret = update_path(cl->mas, cl->slv, ib, ab, cl->cur_ib, cl->cur_ab, + cl->first_hop, cl->active_only); + + if (ret) { + MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n", + __func__, ret, cl->active_only); + goto exit_update_request; + } + + cl->cur_ib = ib; + cl->cur_ab = ab; + + if (log_transaction) + getpath_debug(cl->mas, cl->first_hop, cl->active_only); + trace_bus_update_request_end(cl->name); +exit_update_request: + mutex_unlock(&msm_bus_adhoc_lock); + + return ret; +} + +static void unregister_adhoc(struct msm_bus_client_handle *cl) +{ + mutex_lock(&msm_bus_adhoc_lock); + if (!cl) { + MSM_BUS_ERR("%s: Null cl handle passed unregister\n", + __func__); + goto exit_unregister_client; + } + + MSM_BUS_DBG("%s: Unregistering client %p", __func__, cl); + + remove_path(cl->mas, cl->slv, cl->cur_ib, cl->cur_ab, + cl->first_hop, cl->active_only); + + msm_bus_dbg_remove_client(cl); + kfree(cl); +exit_unregister_client: + mutex_unlock(&msm_bus_adhoc_lock); + return; +} + + +static struct msm_bus_client_handle* +register_adhoc(uint32_t mas, uint32_t slv, char *name, bool active_only) +{ + struct msm_bus_client_handle *client = NULL; + int len = 0; + + mutex_lock(&msm_bus_adhoc_lock); + + if (!(mas && slv && name)) { + pr_err("%s: Error: src dst name num_paths are required", + __func__); + goto exit_register; + } + + client = kzalloc(sizeof(struct msm_bus_client_handle), GFP_KERNEL); + if (!client) { + MSM_BUS_ERR("%s: Error allocating client data", __func__); + goto exit_register; + } + + len = strnlen(name, MAX_STR_CL); + client->name = kzalloc(len, GFP_KERNEL); + if (!client->name) { + MSM_BUS_ERR("%s: Error allocating client name buf", __func__); + free_cl_mem(client); + goto exit_register; + } + strlcpy(client->name, name, MAX_STR_CL); + client->active_only = active_only; + + client->mas = mas; + client->slv = slv; + client->first_hop = getpath(client->mas, client->slv); + if (client->first_hop < 0) { + MSM_BUS_ERR("%s:Failed to find path.src %d dest %d", + __func__, client->mas, client->slv); + free_cl_mem(client); + goto exit_register; + } + + MSM_BUS_DBG("%s:Client handle %p %s", __func__, client, + client->name); + msm_bus_dbg_add_client(client); +exit_register: + mutex_unlock(&msm_bus_adhoc_lock); + return client; +} +/** + * msm_bus_arb_setops_adhoc() : Setup the bus arbitration ops + * @ arb_ops: pointer to the arb ops. + */ +void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops) +{ + arb_ops->register_client = register_client_adhoc; + arb_ops->update_request = update_request_adhoc; + arb_ops->unregister_client = unregister_client_adhoc; + + arb_ops->register_cl = register_adhoc; + arb_ops->unregister = unregister_adhoc; + arb_ops->update_bw = update_bw_adhoc; +} diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc.c new file mode 100644 index 000000000000..78bfeb09fddc --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc.c @@ -0,0 +1,2112 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__ + +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/msm-bus-board.h> +#include "msm_bus_core.h" +#include "msm_bus_bimc.h" +#include "msm_bus_adhoc.h" +#include <trace/events/trace_msm_bus.h> + +enum msm_bus_bimc_slave_block { + SLAVE_BLOCK_RESERVED = 0, + SLAVE_BLOCK_SLAVE_WAY, + SLAVE_BLOCK_XPU, + SLAVE_BLOCK_ARBITER, + SLAVE_BLOCK_SCMO, +}; + +enum bke_sw { + BKE_OFF = 0, + BKE_ON = 1, +}; + +/* M_Generic */ + +#define M_REG_BASE(b) ((b) + 0x00008000) + +#define M_COMPONENT_INFO_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000000) +enum bimc_m_component_info { + M_COMPONENT_INFO_RMSK = 0xffffff, + M_COMPONENT_INFO_INSTANCE_BMSK = 0xff0000, + M_COMPONENT_INFO_INSTANCE_SHFT = 0x10, + M_COMPONENT_INFO_SUB_TYPE_BMSK = 0xff00, + M_COMPONENT_INFO_SUB_TYPE_SHFT = 0x8, + M_COMPONENT_INFO_TYPE_BMSK = 0xff, + M_COMPONENT_INFO_TYPE_SHFT = 0x0, +}; + +#define M_CONFIG_INFO_0_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000020) +enum bimc_m_config_info_0 { + M_CONFIG_INFO_0_RMSK = 0xff00ffff, + M_CONFIG_INFO_0_SYNC_MODE_BMSK = 0xff000000, + M_CONFIG_INFO_0_SYNC_MODE_SHFT = 0x18, + M_CONFIG_INFO_0_CONNECTION_TYPE_BMSK = 0xff00, + M_CONFIG_INFO_0_CONNECTION_TYPE_SHFT = 0x8, + M_CONFIG_INFO_0_FUNC_BMSK = 0xff, + M_CONFIG_INFO_0_FUNC_SHFT = 0x0, +}; + +#define M_CONFIG_INFO_1_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000030) +enum bimc_m_config_info_1 { + M_CONFIG_INFO_1_RMSK = 0xffffffff, + M_CONFIG_INFO_1_SWAY_CONNECTIVITY_BMSK = 0xffffffff, + M_CONFIG_INFO_1_SWAY_CONNECTIVITY_SHFT = 0x0, +}; + +#define M_CONFIG_INFO_2_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000040) +enum bimc_m_config_info_2 { + M_CONFIG_INFO_2_RMSK = 0xffffffff, + M_CONFIG_INFO_2_M_DATA_WIDTH_BMSK = 0xffff0000, + M_CONFIG_INFO_2_M_DATA_WIDTH_SHFT = 0x10, + M_CONFIG_INFO_2_M_TID_WIDTH_BMSK = 0xff00, + M_CONFIG_INFO_2_M_TID_WIDTH_SHFT = 0x8, + M_CONFIG_INFO_2_M_MID_WIDTH_BMSK = 0xff, + M_CONFIG_INFO_2_M_MID_WIDTH_SHFT = 0x0, +}; + +#define M_CONFIG_INFO_3_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000050) +enum bimc_m_config_info_3 { + M_CONFIG_INFO_3_RMSK = 0xffffffff, + M_CONFIG_INFO_3_RCH_DEPTH_BMSK = 0xff000000, + M_CONFIG_INFO_3_RCH_DEPTH_SHFT = 0x18, + M_CONFIG_INFO_3_BCH_DEPTH_BMSK = 0xff0000, + M_CONFIG_INFO_3_BCH_DEPTH_SHFT = 0x10, + M_CONFIG_INFO_3_WCH_DEPTH_BMSK = 0xff00, + M_CONFIG_INFO_3_WCH_DEPTH_SHFT = 0x8, + M_CONFIG_INFO_3_ACH_DEPTH_BMSK = 0xff, + M_CONFIG_INFO_3_ACH_DEPTH_SHFT = 0x0, +}; + +#define M_CONFIG_INFO_4_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000060) +enum bimc_m_config_info_4 { + M_CONFIG_INFO_4_RMSK = 0xffff, + M_CONFIG_INFO_4_REORDER_BUF_DEPTH_BMSK = 0xff00, + M_CONFIG_INFO_4_REORDER_BUF_DEPTH_SHFT = 0x8, + M_CONFIG_INFO_4_REORDER_TABLE_DEPTH_BMSK = 0xff, + M_CONFIG_INFO_4_REORDER_TABLE_DEPTH_SHFT = 0x0, +}; + +#define M_CONFIG_INFO_5_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000070) +enum bimc_m_config_info_5 { + M_CONFIG_INFO_5_RMSK = 0x111, + M_CONFIG_INFO_5_MP2ARB_PIPELINE_EN_BMSK = 0x100, + M_CONFIG_INFO_5_MP2ARB_PIPELINE_EN_SHFT = 0x8, + M_CONFIG_INFO_5_MPBUF_PIPELINE_EN_BMSK = 0x10, + M_CONFIG_INFO_5_MPBUF_PIPELINE_EN_SHFT = 0x4, + M_CONFIG_INFO_5_M2MP_PIPELINE_EN_BMSK = 0x1, + M_CONFIG_INFO_5_M2MP_PIPELINE_EN_SHFT = 0x0, +}; + +#define M_INT_STATUS_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000100) +enum bimc_m_int_status { + M_INT_STATUS_RMSK = 0x3, +}; + +#define M_INT_CLR_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000108) +enum bimc_m_int_clr { + M_INT_CLR_RMSK = 0x3, +}; + +#define M_INT_EN_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x0000010c) +enum bimc_m_int_en { + M_INT_EN_RMSK = 0x3, +}; + +#define M_CLK_CTRL_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000200) +enum bimc_m_clk_ctrl { + M_CLK_CTRL_RMSK = 0x3, + M_CLK_CTRL_MAS_CLK_GATING_EN_BMSK = 0x2, + M_CLK_CTRL_MAS_CLK_GATING_EN_SHFT = 0x1, + M_CLK_CTRL_CORE_CLK_GATING_EN_BMSK = 0x1, + M_CLK_CTRL_CORE_CLK_GATING_EN_SHFT = 0x0, +}; + +#define M_MODE_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210) +enum bimc_m_mode { + M_MODE_RMSK = 0xf0000011, + M_MODE_WR_GATHER_BEATS_BMSK = 0xf0000000, + M_MODE_WR_GATHER_BEATS_SHFT = 0x1c, + M_MODE_NARROW_WR_BMSK = 0x10, + M_MODE_NARROW_WR_SHFT = 0x4, + M_MODE_ORDERING_MODEL_BMSK = 0x1, + M_MODE_ORDERING_MODEL_SHFT = 0x0, +}; + +#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230) +enum bimc_m_priolvl_override { + M_PRIOLVL_OVERRIDE_RMSK = 0x301, + M_PRIOLVL_OVERRIDE_BMSK = 0x300, + M_PRIOLVL_OVERRIDE_SHFT = 0x8, + M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK = 0x1, + M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT = 0x0, +}; + +#define M_RD_CMD_OVERRIDE_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240) +enum bimc_m_read_command_override { + M_RD_CMD_OVERRIDE_RMSK = 0x3071f7f, + M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000, + M_RD_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18, + M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000, + M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10, + M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000, + M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc, + M_RD_CMD_OVERRIDE_ASHARED_BMSK = 0x800, + M_RD_CMD_OVERRIDE_ASHARED_SHFT = 0xb, + M_RD_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400, + M_RD_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa, + M_RD_CMD_OVERRIDE_AOOO_BMSK = 0x200, + M_RD_CMD_OVERRIDE_AOOO_SHFT = 0x9, + M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100, + M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8, + M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40, + M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6, + M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20, + M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5, + M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10, + M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4, + M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8, + M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3, + M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4, + M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2, + M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2, + M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1, + M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1, + M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0, +}; + +#define M_WR_CMD_OVERRIDE_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250) +enum bimc_m_write_command_override { + M_WR_CMD_OVERRIDE_RMSK = 0x3071f7f, + M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000, + M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18, + M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000, + M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10, + M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000, + M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc, + M_WR_CMD_OVERRIDE_ASHARED_BMSK = 0x800, + M_WR_CMD_OVERRIDE_ASHARED_SHFT = 0xb, + M_WR_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400, + M_WR_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa, + M_WR_CMD_OVERRIDE_AOOO_BMSK = 0x200, + M_WR_CMD_OVERRIDE_AOOO_SHFT = 0x9, + M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100, + M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8, + M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40, + M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6, + M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20, + M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5, + M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10, + M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4, + M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8, + M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3, + M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4, + M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2, + M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2, + M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1, + M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1, + M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0, +}; + +#define M_BKE_EN_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300) +enum bimc_m_bke_en { + M_BKE_EN_RMSK = 0x1, + M_BKE_EN_EN_BMSK = 0x1, + M_BKE_EN_EN_SHFT = 0x0, +}; + +/* Grant Period registers */ +#define M_BKE_GP_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304) +enum bimc_m_bke_grant_period { + M_BKE_GP_RMSK = 0x3ff, + M_BKE_GP_GP_BMSK = 0x3ff, + M_BKE_GP_GP_SHFT = 0x0, +}; + +/* Grant count register. + * The Grant count register represents a signed 16 bit + * value, range 0-0x7fff + */ +#define M_BKE_GC_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308) +enum bimc_m_bke_grant_count { + M_BKE_GC_RMSK = 0xffff, + M_BKE_GC_GC_BMSK = 0xffff, + M_BKE_GC_GC_SHFT = 0x0, +}; + +/* Threshold High Registers */ +#define M_BKE_THH_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320) +enum bimc_m_bke_thresh_high { + M_BKE_THH_RMSK = 0xffff, + M_BKE_THH_THRESH_BMSK = 0xffff, + M_BKE_THH_THRESH_SHFT = 0x0, +}; + +/* Threshold Medium Registers */ +#define M_BKE_THM_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324) +enum bimc_m_bke_thresh_medium { + M_BKE_THM_RMSK = 0xffff, + M_BKE_THM_THRESH_BMSK = 0xffff, + M_BKE_THM_THRESH_SHFT = 0x0, +}; + +/* Threshold Low Registers */ +#define M_BKE_THL_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328) +enum bimc_m_bke_thresh_low { + M_BKE_THL_RMSK = 0xffff, + M_BKE_THL_THRESH_BMSK = 0xffff, + M_BKE_THL_THRESH_SHFT = 0x0, +}; + +#define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340) +enum bimc_m_bke_health_0 { + M_BKE_HEALTH_0_CONFIG_RMSK = 0x80000303, + M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK = 0x80000000, + M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT = 0x1f, + M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK = 0x300, + M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT = 0x8, + M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK = 0x3, + M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT = 0x0, +}; + +#define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344) +enum bimc_m_bke_health_1 { + M_BKE_HEALTH_1_CONFIG_RMSK = 0x80000303, + M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK = 0x80000000, + M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT = 0x1f, + M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK = 0x300, + M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT = 0x8, + M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK = 0x3, + M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT = 0x0, +}; + +#define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348) +enum bimc_m_bke_health_2 { + M_BKE_HEALTH_2_CONFIG_RMSK = 0x80000303, + M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK = 0x80000000, + M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT = 0x1f, + M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK = 0x300, + M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT = 0x8, + M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK = 0x3, + M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT = 0x0, +}; + +#define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c) +enum bimc_m_bke_health_3 { + M_BKE_HEALTH_3_CONFIG_RMSK = 0x303, + M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK = 0x300, + M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT = 0x8, + M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK = 0x3, + M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT = 0x0, +}; + +#define M_BUF_STATUS_ADDR(b, n) \ + (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000400) +enum bimc_m_buf_status { + M_BUF_STATUS_RMSK = 0xf03f030, + M_BUF_STATUS_RCH_DATA_WR_FULL_BMSK = 0x8000000, + M_BUF_STATUS_RCH_DATA_WR_FULL_SHFT = 0x1b, + M_BUF_STATUS_RCH_DATA_WR_EMPTY_BMSK = 0x4000000, + M_BUF_STATUS_RCH_DATA_WR_EMPTY_SHFT = 0x1a, + M_BUF_STATUS_RCH_CTRL_WR_FULL_BMSK = 0x2000000, + M_BUF_STATUS_RCH_CTRL_WR_FULL_SHFT = 0x19, + M_BUF_STATUS_RCH_CTRL_WR_EMPTY_BMSK = 0x1000000, + M_BUF_STATUS_RCH_CTRL_WR_EMPTY_SHFT = 0x18, + M_BUF_STATUS_BCH_WR_FULL_BMSK = 0x20000, + M_BUF_STATUS_BCH_WR_FULL_SHFT = 0x11, + M_BUF_STATUS_BCH_WR_EMPTY_BMSK = 0x10000, + M_BUF_STATUS_BCH_WR_EMPTY_SHFT = 0x10, + M_BUF_STATUS_WCH_DATA_RD_FULL_BMSK = 0x8000, + M_BUF_STATUS_WCH_DATA_RD_FULL_SHFT = 0xf, + M_BUF_STATUS_WCH_DATA_RD_EMPTY_BMSK = 0x4000, + M_BUF_STATUS_WCH_DATA_RD_EMPTY_SHFT = 0xe, + M_BUF_STATUS_WCH_CTRL_RD_FULL_BMSK = 0x2000, + M_BUF_STATUS_WCH_CTRL_RD_FULL_SHFT = 0xd, + M_BUF_STATUS_WCH_CTRL_RD_EMPTY_BMSK = 0x1000, + M_BUF_STATUS_WCH_CTRL_RD_EMPTY_SHFT = 0xc, + M_BUF_STATUS_ACH_RD_FULL_BMSK = 0x20, + M_BUF_STATUS_ACH_RD_FULL_SHFT = 0x5, + M_BUF_STATUS_ACH_RD_EMPTY_BMSK = 0x10, + M_BUF_STATUS_ACH_RD_EMPTY_SHFT = 0x4, +}; +/*BIMC Generic */ + +#define S_REG_BASE(b) ((b) + 0x00048000) + +#define S_COMPONENT_INFO_ADDR(b, n) \ + (S_REG_BASE(b) + (0x8000 * (n)) + 0x00000000) +enum bimc_s_component_info { + S_COMPONENT_INFO_RMSK = 0xffffff, + S_COMPONENT_INFO_INSTANCE_BMSK = 0xff0000, + S_COMPONENT_INFO_INSTANCE_SHFT = 0x10, + S_COMPONENT_INFO_SUB_TYPE_BMSK = 0xff00, + S_COMPONENT_INFO_SUB_TYPE_SHFT = 0x8, + S_COMPONENT_INFO_TYPE_BMSK = 0xff, + S_COMPONENT_INFO_TYPE_SHFT = 0x0, +}; + +#define S_HW_INFO_ADDR(b, n) \ + (S_REG_BASE(b) + (0x80000 * (n)) + 0x00000010) +enum bimc_s_hw_info { + S_HW_INFO_RMSK = 0xffffffff, + S_HW_INFO_MAJOR_BMSK = 0xff000000, + S_HW_INFO_MAJOR_SHFT = 0x18, + S_HW_INFO_BRANCH_BMSK = 0xff0000, + S_HW_INFO_BRANCH_SHFT = 0x10, + S_HW_INFO_MINOR_BMSK = 0xff00, + S_HW_INFO_MINOR_SHFT = 0x8, + S_HW_INFO_ECO_BMSK = 0xff, + S_HW_INFO_ECO_SHFT = 0x0, +}; + + +/* S_SCMO_GENERIC */ + +#define S_SCMO_REG_BASE(b) ((b) + 0x00048000) + +#define S_SCMO_CONFIG_INFO_0_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000020) +enum bimc_s_scmo_config_info_0 { + S_SCMO_CONFIG_INFO_0_RMSK = 0xffffffff, + S_SCMO_CONFIG_INFO_0_DATA_WIDTH_BMSK = 0xffff0000, + S_SCMO_CONFIG_INFO_0_DATA_WIDTH_SHFT = 0x10, + S_SCMO_CONFIG_INFO_0_TID_WIDTH_BMSK = 0xff00, + S_SCMO_CONFIG_INFO_0_TID_WIDTH_SHFT = 0x8, + S_SCMO_CONFIG_INFO_0_MID_WIDTH_BMSK = 0xff, + S_SCMO_CONFIG_INFO_0_MID_WIDTH_SHFT = 0x0, +}; + +#define S_SCMO_CONFIG_INFO_1_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000030) +enum bimc_s_scmo_config_info_1 { + S_SCMO_CONFIG_INFO_1_RMSK = 0xffffffff, + S_SCMO_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK = 0xffffffff, + S_SCMO_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT = 0x0, +}; + +#define S_SCMO_CONFIG_INFO_2_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000040) +enum bimc_s_scmo_config_info_2 { + S_SCMO_CONFIG_INFO_2_RMSK = 0xff00ff, + S_SCMO_CONFIG_INFO_2_NUM_GLOBAL_MONS_BMSK = 0xff0000, + S_SCMO_CONFIG_INFO_2_NUM_GLOBAL_MONS_SHFT = 0x10, + S_SCMO_CONFIG_INFO_2_VMID_WIDTH_BMSK = 0xff, + S_SCMO_CONFIG_INFO_2_VMID_WIDTH_SHFT = 0x0, +}; + +#define S_SCMO_CONFIG_INFO_3_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000050) +enum bimc_s_scmo_config_info_3 { + S_SCMO_CONFIG_INFO_3_RMSK = 0xffffffff, + S_SCMO_CONFIG_INFO_3_RCH0_CTRL_DEPTH_BMSK = 0xff000000, + S_SCMO_CONFIG_INFO_3_RCH0_CTRL_DEPTH_SHFT = 0x18, + S_SCMO_CONFIG_INFO_3_RCH0_DEPTH_BMSK = 0xff0000, + S_SCMO_CONFIG_INFO_3_RCH0_DEPTH_SHFT = 0x10, + S_SCMO_CONFIG_INFO_3_BCH_DEPTH_BMSK = 0xff00, + S_SCMO_CONFIG_INFO_3_BCH_DEPTH_SHFT = 0x8, + S_SCMO_CONFIG_INFO_3_WCH_DEPTH_BMSK = 0xff, + S_SCMO_CONFIG_INFO_3_WCH_DEPTH_SHFT = 0x0, +}; + +#define S_SCMO_CONFIG_INFO_4_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000060) +enum bimc_s_scmo_config_info_4 { + S_SCMO_CONFIG_INFO_4_RMSK = 0xffff, + S_SCMO_CONFIG_INFO_4_RCH1_CTRL_DEPTH_BMSK = 0xff00, + S_SCMO_CONFIG_INFO_4_RCH1_CTRL_DEPTH_SHFT = 0x8, + S_SCMO_CONFIG_INFO_4_RCH1_DEPTH_BMSK = 0xff, + S_SCMO_CONFIG_INFO_4_RCH1_DEPTH_SHFT = 0x0, +}; + +#define S_SCMO_CONFIG_INFO_5_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000070) +enum bimc_s_scmo_config_info_5 { + S_SCMO_CONFIG_INFO_5_RMSK = 0xffff, + S_SCMO_CONFIG_INFO_5_DPE_CQ_DEPTH_BMSK = 0xff00, + S_SCMO_CONFIG_INFO_5_DPE_CQ_DEPTH_SHFT = 0x8, + S_SCMO_CONFIG_INFO_5_DDR_BUS_WIDTH_BMSK = 0xff, + S_SCMO_CONFIG_INFO_5_DDR_BUS_WIDTH_SHFT = 0x0, +}; + +#define S_SCMO_CONFIG_INFO_6_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000080) +enum bimc_s_scmo_config_info_6 { + S_SCMO_CONFIG_INFO_6_RMSK = 0x1111, + S_SCMO_CONFIG_INFO_6_WBUFC_PIPE_BMSK = 0x1000, + S_SCMO_CONFIG_INFO_6_WBUFC_PIPE_SHFT = 0xc, + S_SCMO_CONFIG_INFO_6_RDOPT_PIPE_BMSK = 0x100, + S_SCMO_CONFIG_INFO_6_RDOPT_PIPE_SHFT = 0x8, + S_SCMO_CONFIG_INFO_6_ACHAN_INTF_PIPE_BMSK = 0x10, + S_SCMO_CONFIG_INFO_6_ACHAN_INTF_PIPE_SHFT = 0x4, + S_SCMO_CONFIG_INFO_6_ADDR_DECODE_HT_BMSK = 0x1, + S_SCMO_CONFIG_INFO_6_ADDR_DECODE_HT_SHFT = 0x0, +}; + +#define S_SCMO_INT_STATUS_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000100) +enum bimc_s_scmo_int_status { + S_SCMO_INT_STATUS_RMSK = 0x1, + S_SCMO_INT_STATUS_ERR_OCCURED_BMSK = 0x1, + S_SCMO_INT_STATUS_ERR_OCCURED_SHFT = 0x0, +}; + +#define S_SCMO_INT_CLR_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000108) +enum bimc_s_scmo_int_clr { + S_SCMO_INT_CLR_RMSK = 0x1, + S_SCMO_INT_CLR_IRQ_CLR_BMSK = 0x1, + S_SCMO_INT_CLR_IRQ_CLR_SHFT = 0x0, +}; + +#define S_SCMO_INT_EN_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000010c) +enum bimc_s_scmo_int_en { + S_SCMO_INT_EN_RMSK = 0x1, + S_SCMO_INT_EN_IRQ_EN_BMSK = 0x1, + S_SCMO_INT_EN_IRQ_EN_SHFT = 0x0, +}; + +#define S_SCMO_ESYN_ADDR_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000120) +enum bimc_s_scmo_esyn_addr { + S_SCMO_ESYN_ADDR_RMSK = 0xffffffff, + S_SCMO_ESYN_ADDR_ESYN_ADDR_ERR_ADDR_BMSK = 0xffffffff, + S_SCMO_ESYN_ADDR_ESYN_ADDR_ERR_ADDR_SHFT = 0x0, +}; + +#define S_SCMO_ESYN_APACKET_0_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000128) +enum bimc_s_scmo_esyn_apacket_0 { + S_SCMO_ESYN_APACKET_0_RMSK = 0xff1fffff, + S_SCMO_ESYN_APACKET_0_ERR_ATID_BMSK = 0xff000000, + S_SCMO_ESYN_APACKET_0_ERR_ATID_SHFT = 0x18, + S_SCMO_ESYN_APACKET_0_ERR_AVMID_BMSK = 0x1f0000, + S_SCMO_ESYN_APACKET_0_ERR_AVMID_SHFT = 0x10, + S_SCMO_ESYN_APACKET_0_ERR_AMID_BMSK = 0xffff, + S_SCMO_ESYN_APACKET_0_ERR_AMID_SHFT = 0x0, +}; + +#define S_SCMO_ESYN_APACKET_1_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000012c) +enum bimc_s_scmo_esyn_apacket_1 { + S_SCMO_ESYN_APACKET_1_RMSK = 0x10ff117, + S_SCMO_ESYN_APACKET_1_ERR_CODE_BMSK = 0x1000000, + S_SCMO_ESYN_APACKET_1_ERR_CODE_SHFT = 0x18, + S_SCMO_ESYN_APACKET_1_ERR_ALEN_BMSK = 0xf0000, + S_SCMO_ESYN_APACKET_1_ERR_ALEN_SHFT = 0x10, + S_SCMO_ESYN_APACKET_1_ERR_ASIZE_BMSK = 0xe000, + S_SCMO_ESYN_APACKET_1_ERR_ASIZE_SHFT = 0xd, + S_SCMO_ESYN_APACKET_1_ERR_ABURST_BMSK = 0x1000, + S_SCMO_ESYN_APACKET_1_ERR_ABURST_SHFT = 0xc, + S_SCMO_ESYN_APACKET_1_ERR_AEXCLUSIVE_BMSK = 0x100, + S_SCMO_ESYN_APACKET_1_ERR_AEXCLUSIVE_SHFT = 0x8, + S_SCMO_ESYN_APACKET_1_ERR_APRONTS_BMSK = 0x10, + S_SCMO_ESYN_APACKET_1_ERR_APRONTS_SHFT = 0x4, + S_SCMO_ESYN_APACKET_1_ERR_AOOORD_BMSK = 0x4, + S_SCMO_ESYN_APACKET_1_ERR_AOOORD_SHFT = 0x2, + S_SCMO_ESYN_APACKET_1_ERR_AOOOWR_BMSK = 0x2, + S_SCMO_ESYN_APACKET_1_ERR_AOOOWR_SHFT = 0x1, + S_SCMO_ESYN_APACKET_1_ERR_AWRITE_BMSK = 0x1, + S_SCMO_ESYN_APACKET_1_ERR_AWRITE_SHFT = 0x0, +}; + +#define S_SCMO_CLK_CTRL_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000200) +enum bimc_s_scmo_clk_ctrl { + S_SCMO_CLK_CTRL_RMSK = 0xffff1111, + S_SCMO_CLK_CTRL_PEN_CMD_CG_EN_BMSK = 0x10000, + S_SCMO_CLK_CTRL_PEN_CMD_CG_EN_SHFT = 0x10, + S_SCMO_CLK_CTRL_RCH_CG_EN_BMSK = 0x1000, + S_SCMO_CLK_CTRL_RCH_CG_EN_SHFT = 0xc, + S_SCMO_CLK_CTRL_FLUSH_CG_EN_BMSK = 0x100, + S_SCMO_CLK_CTRL_FLUSH_CG_EN_SHFT = 0x8, + S_SCMO_CLK_CTRL_WCH_CG_EN_BMSK = 0x10, + S_SCMO_CLK_CTRL_WCH_CG_EN_SHFT = 0x4, + S_SCMO_CLK_CTRL_ACH_CG_EN_BMSK = 0x1, + S_SCMO_CLK_CTRL_ACH_CG_EN_SHFT = 0x0, +}; + +#define S_SCMO_SLV_INTERLEAVE_CFG_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000400) +enum bimc_s_scmo_slv_interleave_cfg { + S_SCMO_SLV_INTERLEAVE_CFG_RMSK = 0xff, + S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS1_BMSK = 0x10, + S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS1_SHFT = 0x4, + S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS0_BMSK = 0x1, + S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS0_SHFT = 0x0, +}; + +#define S_SCMO_ADDR_BASE_CSn_ADDR(b, n, o) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000410 + 0x4 * (o)) +enum bimc_s_scmo_addr_base_csn { + S_SCMO_ADDR_BASE_CSn_RMSK = 0xffff, + S_SCMO_ADDR_BASE_CSn_MAXn = 1, + S_SCMO_ADDR_BASE_CSn_ADDR_BASE_BMSK = 0xfc, + S_SCMO_ADDR_BASE_CSn_ADDR_BASE_SHFT = 0x2, +}; + +#define S_SCMO_ADDR_MAP_CSn_ADDR(b, n, o) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000420 + 0x4 * (o)) +enum bimc_s_scmo_addr_map_csn { + S_SCMO_ADDR_MAP_CSn_RMSK = 0xffff, + S_SCMO_ADDR_MAP_CSn_MAXn = 1, + S_SCMO_ADDR_MAP_CSn_RANK_EN_BMSK = 0x8000, + S_SCMO_ADDR_MAP_CSn_RANK_EN_SHFT = 0xf, + S_SCMO_ADDR_MAP_CSn_ADDR_MODE_BMSK = 0x1000, + S_SCMO_ADDR_MAP_CSn_ADDR_MODE_SHFT = 0xc, + S_SCMO_ADDR_MAP_CSn_BANK_SIZE_BMSK = 0x100, + S_SCMO_ADDR_MAP_CSn_BANK_SIZE_SHFT = 0x8, + S_SCMO_ADDR_MAP_CSn_ROW_SIZE_BMSK = 0x30, + S_SCMO_ADDR_MAP_CSn_ROW_SIZE_SHFT = 0x4, + S_SCMO_ADDR_MAP_CSn_COL_SIZE_BMSK = 0x3, + S_SCMO_ADDR_MAP_CSn_COL_SIZE_SHFT = 0x0, +}; + +#define S_SCMO_ADDR_MASK_CSn_ADDR(b, n, o) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000430 + 0x4 * (0)) +enum bimc_s_scmo_addr_mask_csn { + S_SCMO_ADDR_MASK_CSn_RMSK = 0xffff, + S_SCMO_ADDR_MASK_CSn_MAXn = 1, + S_SCMO_ADDR_MASK_CSn_ADDR_MASK_BMSK = 0xfc, + S_SCMO_ADDR_MASK_CSn_ADDR_MASK_SHFT = 0x2, +}; + +#define S_SCMO_SLV_STATUS_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000450) +enum bimc_s_scmo_slv_status { + S_SCMO_SLV_STATUS_RMSK = 0xff3, + S_SCMO_SLV_STATUS_GLOBAL_MONS_IN_USE_BMSK = 0xff0, + S_SCMO_SLV_STATUS_GLOBAL_MONS_IN_USE_SHFT = 0x4, + S_SCMO_SLV_STATUS_SLAVE_IDLE_BMSK = 0x3, + S_SCMO_SLV_STATUS_SLAVE_IDLE_SHFT = 0x0, +}; + +#define S_SCMO_CMD_BUF_CFG_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000500) +enum bimc_s_scmo_cmd_buf_cfg { + S_SCMO_CMD_BUF_CFG_RMSK = 0xf1f, + S_SCMO_CMD_BUF_CFG_CMD_ORDERING_BMSK = 0x300, + S_SCMO_CMD_BUF_CFG_CMD_ORDERING_SHFT = 0x8, + S_SCMO_CMD_BUF_CFG_HP_CMD_AREQPRIO_MAP_BMSK = 0x10, + S_SCMO_CMD_BUF_CFG_HP_CMD_AREQPRIO_MAP_SHFT = 0x4, + S_SCMO_CMD_BUF_CFG_HP_CMD_Q_DEPTH_BMSK = 0x7, + S_SCMO_CMD_BUF_CFG_HP_CMD_Q_DEPTH_SHFT = 0x0, +}; + +#define S_SCM_CMD_BUF_STATUS_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000520) +enum bimc_s_scm_cmd_buf_status { + S_SCMO_CMD_BUF_STATUS_RMSK = 0x77, + S_SCMO_CMD_BUF_STATUS_HP_CMD_BUF_ENTRIES_IN_USE_BMSK = 0x70, + S_SCMO_CMD_BUF_STATUS_HP_CMD_BUF_ENTRIES_IN_USE_SHFT = 0x4, + S_SCMO_CMD_BUF_STATUS_LP_CMD_BUF_ENTRIES_IN_USE_BMSK = 0x7, + S_SCMO_CMD_BUF_STATUS_LP_CMD_BUF_ENTRIES_IN_USE_SHFT = 0x0, +}; + +#define S_SCMO_RCH_SEL_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000540) +enum bimc_s_scmo_rch_sel { + S_SCMO_RCH_SEL_RMSK = 0xffffffff, + S_SCMO_CMD_BUF_STATUS_RCH_PORTS_BMSK = 0xffffffff, + S_SCMO_CMD_BUF_STATUS_RCH_PORTS_SHFT = 0x0, +}; + +#define S_SCMO_RCH_BKPR_CFG_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000544) +enum bimc_s_scmo_rch_bkpr_cfg { + S_SCMO_RCH_BKPR_CFG_RMSK = 0xffffffff, + S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_HI_TH_BMSK = 0x3f000000, + S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_HI_TH_SHFT = 0x18, + S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_LO_TH_BMSK = 0x3f0000, + S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_LO_TH_SHFT = 0x10, + S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_HI_TH_BMSK = 0x3f00, + S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_HI_TH_SHFT = 0x8, + S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_LO_TH_BMSK = 0x3f, + S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_LO_TH_SHFT = 0x0, +}; + +#define S_SCMO_RCH_STATUS_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000560) +enum bimc_s_scmo_rch_status { + S_SCMO_RCH_STATUS_RMSK = 0x33333, + S_SCMO_RCH_STATUS_PRQ_FIFO_FULL_BMSK = 0x20000, + S_SCMO_RCH_STATUS_PRQ_FIFO_FULL_SHFT = 0x11, + S_SCMO_RCH_STATUS_PRQ_FIFO_EMPTY_BMSK = 0x10000, + S_SCMO_RCH_STATUS_PRQ_FIFO_EMPTY_SHFT = 0x10, + S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_FULL_BMSK = 0x2000, + S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_FULL_SHFT = 0xd, + S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_EMPTY_BMSK = 0x1000, + S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_EMPTY_SHFT = 0xc, + S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_FULL_BMSK = 0x200, + S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_FULL_SHFT = 0x9, + S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_EMPTY_BMSK = 0x100, + S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_EMPTY_SHFT = 0x8, + S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_FULL_BMSK = 0x20, + S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_FULL_SHFT = 0x5, + S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_EMPTY_BMSK = 0x10, + S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_EMPTY_SHFT = 0x4, + S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_FULL_BMSK = 0x2, + S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_FULL_SHFT = 0x1, + S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_EMPTY_BMSK = 0x1, + S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_EMPTY_SHFT = 0x0, +}; + +#define S_SCMO_WCH_BUF_CFG_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000580) +enum bimc_s_scmo_wch_buf_cfg { + S_SCMO_WCH_BUF_CFG_RMSK = 0xff, + S_SCMO_WCH_BUF_CFG_WRITE_BLOCK_READ_BMSK = 0x10, + S_SCMO_WCH_BUF_CFG_WRITE_BLOCK_READ_SHFT = 0x4, + S_SCMO_WCH_BUF_CFG_COALESCE_EN_BMSK = 0x1, + S_SCMO_WCH_BUF_CFG_COALESCE_EN_SHFT = 0x0, +}; + +#define S_SCMO_WCH_STATUS_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005a0) +enum bimc_s_scmo_wch_status { + S_SCMO_WCH_STATUS_RMSK = 0x333, + S_SCMO_WCH_STATUS_BRESP_FIFO_FULL_BMSK = 0x200, + S_SCMO_WCH_STATUS_BRESP_FIFO_FULL_SHFT = 0x9, + S_SCMO_WCH_STATUS_BRESP_FIFO_EMPTY_BMSK = 0x100, + S_SCMO_WCH_STATUS_BRESP_FIFO_EMPTY_SHFT = 0x8, + S_SCMO_WCH_STATUS_WDATA_FIFO_FULL_BMSK = 0x20, + S_SCMO_WCH_STATUS_WDATA_FIFO_FULL_SHFT = 0x5, + S_SCMO_WCH_STATUS_WDATA_FIFO_EMPTY_BMSK = 0x10, + S_SCMO_WCH_STATUS_WDATA_FIFO_EMPTY_SHFT = 0x4, + S_SCMO_WCH_STATUS_WBUF_FULL_BMSK = 0x2, + S_SCMO_WCH_STATUS_WBUF_FULL_SHFT = 0x1, + S_SCMO_WCH_STATUS_WBUF_EMPTY_BMSK = 0x1, + S_SCMO_WCH_STATUS_WBUF_EMPTY_SHFT = 0x0, +}; + +#define S_SCMO_FLUSH_CFG_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005c0) +enum bimc_s_scmo_flush_cfg { + S_SCMO_FLUSH_CFG_RMSK = 0xffffffff, + S_SCMO_FLUSH_CFG_FLUSH_IN_ORDER_BMSK = 0x10000000, + S_SCMO_FLUSH_CFG_FLUSH_IN_ORDER_SHFT = 0x1c, + S_SCMO_FLUSH_CFG_FLUSH_IDLE_DELAY_BMSK = 0x3ff0000, + S_SCMO_FLUSH_CFG_FLUSH_IDLE_DELAY_SHFT = 0x10, + S_SCMO_FLUSH_CFG_FLUSH_UPPER_LIMIT_BMSK = 0xf00, + S_SCMO_FLUSH_CFG_FLUSH_UPPER_LIMIT_SHFT = 0x8, + S_SCMO_FLUSH_CFG_FLUSH_LOWER_LIMIT_BMSK = 0xf, + S_SCMO_FLUSH_CFG_FLUSH_LOWER_LIMIT_SHFT = 0x0, +}; + +#define S_SCMO_FLUSH_CMD_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005c4) +enum bimc_s_scmo_flush_cmd { + S_SCMO_FLUSH_CMD_RMSK = 0xf, + S_SCMO_FLUSH_CMD_FLUSH_ALL_BUF_BMSK = 0x3, + S_SCMO_FLUSH_CMD_FLUSH_ALL_BUF_SHFT = 0x0, +}; + +#define S_SCMO_CMD_OPT_CFG0_ADDR(b, n) \ + (S_SCM0_REG_BASE(b) + (0x8000 * (n)) + 0x00000700) +enum bimc_s_scmo_cmd_opt_cfg0 { + S_SCMO_CMD_OPT_CFG0_RMSK = 0xffffff, + S_SCMO_CMD_OPT_CFG0_IGNORE_BANK_UNAVL_BMSK = 0x100000, + S_SCMO_CMD_OPT_CFG0_IGNORE_BANK_UNAVL_SHFT = 0x14, + S_SCMO_CMD_OPT_CFG0_MASK_CMDOUT_PRI_BMSK = 0x10000, + S_SCMO_CMD_OPT_CFG0_MASK_CMDOUT_PRI_SHFT = 0x10, + S_SCMO_CMD_OPT_CFG0_DPE_CMD_REORDERING_BMSK = 0x1000, + S_SCMO_CMD_OPT_CFG0_DPE_CMD_REORDERING_SHFT = 0xc, + S_SCMO_CMD_OPT_CFG0_WR_OPT_EN_BMSK = 0x100, + S_SCMO_CMD_OPT_CFG0_WR_OPT_EN_SHFT = 0x8, + S_SCMO_CMD_OPT_CFG0_RD_OPT_EN_BMSK = 0x10, + S_SCMO_CMD_OPT_CFG0_RD_OPT_EN_SHFT = 0x4, + S_SCMO_CMD_OPT_CFG0_PAGE_MGMT_POLICY_BMSK = 0x1, + S_SCMO_CMD_OPT_CFG0_PAGE_MGMT_POLICY_SHFT = 0x0, +}; + +#define S_SCMO_CMD_OPT_CFG1_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000704) +enum bimc_s_scmo_cmd_opt_cfg1 { + S_SCMO_CMD_OPT_CFG1_RMSK = 0xffffffff, + S_SCMO_CMD_OPT_CFG1_HSTP_CMD_TIMEOUT_BMSK = 0x1f000000, + S_SCMO_CMD_OPT_CFG1_HSTP_CMD_TIMEOUT_SHFT = 0x18, + S_SCMO_CMD_OPT_CFG1_HP_CMD_TIMEOUT_BMSK = 0x1f0000, + S_SCMO_CMD_OPT_CFG1_HP_CMD_TIMEOUT_SHFT = 0x10, + S_SCMO_CMD_OPT_CFG1_MP_CMD_TIMEOUT_BMSK = 0x1f00, + S_SCMO_CMD_OPT_CFG1_MP_CMD_TIMEOUT_SHFT = 0x8, + S_SCMO_CMD_OPT_CFG1_LP_CMD_TIMEOUT_BMSK = 0x1f, + S_SCMO_CMD_OPT_CFG1_LP_CMD_TIMEOUT_SHFT = 0x0, +}; + +#define S_SCMO_CMD_OPT_CFG2_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000708) +enum bimc_s_scmo_cmd_opt_cfg2 { + S_SCMO_CMD_OPT_CFG2_RMSK = 0xff, + S_SCMO_CMD_OPT_CFG2_RWOPT_CMD_TIMEOUT_BMSK = 0xf, + S_SCMO_CMD_OPT_CFG2_RWOPT_CMD_TIMEOUT_SHFT = 0x0, +}; + +#define S_SCMO_CMD_OPT_CFG3_ADDR(b, n) \ + (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000070c) +enum bimc_s_scmo_cmd_opt_cfg3 { + S_SCMO_CMD_OPT_CFG3_RMSK = 0xff, + S_SCMO_CMD_OPT_CFG3_FLUSH_CMD_TIMEOUT_BMSK = 0xf, + S_SCMO_CMD_OPT_CFG3_FLUSH_CMD_TIMEOUT_SHFT = 0x0, +}; + +/* S_SWAY_GENERIC */ +#define S_SWAY_REG_BASE(b) ((b) + 0x00048000) + +#define S_SWAY_CONFIG_INFO_0_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000020) +enum bimc_s_sway_config_info_0 { + S_SWAY_CONFIG_INFO_0_RMSK = 0xff0000ff, + S_SWAY_CONFIG_INFO_0_SYNC_MODE_BMSK = 0xff000000, + S_SWAY_CONFIG_INFO_0_SYNC_MODE_SHFT = 0x18, + S_SWAY_CONFIG_INFO_0_FUNC_BMSK = 0xff, + S_SWAY_CONFIG_INFO_0_FUNC_SHFT = 0x0, +}; + +#define S_SWAY_CONFIG_INFO_1_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000030) +enum bimc_s_sway_config_info_1 { + S_SWAY_CONFIG_INFO_1_RMSK = 0xffffffff, + S_SWAY_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK = 0xffffffff, + S_SWAY_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT = 0x0, +}; + +#define S_SWAY_CONFIG_INFO_2_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000040) +enum bimc_s_sway_config_info_2 { + S_SWAY_CONFIG_INFO_2_RMSK = 0xffff0000, + S_SWAY_CONFIG_INFO_2_MPORT_CONNECTIVITY_BMSK = 0xffff0000, + S_SWAY_CONFIG_INFO_2_MPORT_CONNECTIVITY_SHFT = 0x10, +}; + +#define S_SWAY_CONFIG_INFO_3_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000050) +enum bimc_s_sway_config_info_3 { + S_SWAY_CONFIG_INFO_3_RMSK = 0xffffffff, + S_SWAY_CONFIG_INFO_3_RCH0_DEPTH_BMSK = 0xff000000, + S_SWAY_CONFIG_INFO_3_RCH0_DEPTH_SHFT = 0x18, + S_SWAY_CONFIG_INFO_3_BCH_DEPTH_BMSK = 0xff0000, + S_SWAY_CONFIG_INFO_3_BCH_DEPTH_SHFT = 0x10, + S_SWAY_CONFIG_INFO_3_WCH_DEPTH_BMSK = 0xff, + S_SWAY_CONFIG_INFO_3_WCH_DEPTH_SHFT = 0x0, +}; + +#define S_SWAY_CONFIG_INFO_4_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000060) +enum bimc_s_sway_config_info_4 { + S_SWAY_CONFIG_INFO_4_RMSK = 0x800000ff, + S_SWAY_CONFIG_INFO_4_DUAL_RCH_EN_BMSK = 0x80000000, + S_SWAY_CONFIG_INFO_4_DUAL_RCH_EN_SHFT = 0x1f, + S_SWAY_CONFIG_INFO_4_RCH1_DEPTH_BMSK = 0xff, + S_SWAY_CONFIG_INFO_4_RCH1_DEPTH_SHFT = 0x0, +}; + +#define S_SWAY_CONFIG_INFO_5_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000070) +enum bimc_s_sway_config_info_5 { + S_SWAY_CONFIG_INFO_5_RMSK = 0x800000ff, + S_SWAY_CONFIG_INFO_5_QCH_EN_BMSK = 0x80000000, + S_SWAY_CONFIG_INFO_5_QCH_EN_SHFT = 0x1f, + S_SWAY_CONFIG_INFO_5_QCH_DEPTH_BMSK = 0xff, + S_SWAY_CONFIG_INFO_5_QCH_DEPTH_SHFT = 0x0, +}; + +#define S_SWAY_CONFIG_INFO_6_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000080) +enum bimc_s_sway_config_info_6 { + S_SWAY_CONFIG_INFO_6_RMSK = 0x1, + S_SWAY_CONFIG_INFO_6_S2SW_PIPELINE_EN_BMSK = 0x1, + S_SWAY_CONFIG_INFO_6_S2SW_PIPELINE_EN_SHFT = 0x0, +}; + +#define S_SWAY_INT_STATUS_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000100) +enum bimc_s_sway_int_status { + S_SWAY_INT_STATUS_RMSK = 0x3, + S_SWAY_INT_STATUS_RFU_BMSK = 0x3, + S_SWAY_INT_STATUS_RFU_SHFT = 0x0, +}; + +#define S_SWAY_INT_CLR_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000108) +enum bimc_s_sway_int_clr { + S_SWAY_INT_CLR_RMSK = 0x3, + S_SWAY_INT_CLR_RFU_BMSK = 0x3, + S_SWAY_INT_CLR_RFU_SHFT = 0x0, +}; + + +#define S_SWAY_INT_EN_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x0000010c) +enum bimc_s_sway_int_en { + S_SWAY_INT_EN_RMSK = 0x3, + S_SWAY_INT_EN_RFU_BMSK = 0x3, + S_SWAY_INT_EN_RFU_SHFT = 0x0, +}; + +#define S_SWAY_CLK_CTRL_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000200) +enum bimc_s_sway_clk_ctrl { + S_SWAY_CLK_CTRL_RMSK = 0x3, + S_SWAY_CLK_CTRL_SLAVE_CLK_GATING_EN_BMSK = 0x2, + S_SWAY_CLK_CTRL_SLAVE_CLK_GATING_EN_SHFT = 0x1, + S_SWAY_CLK_CTRL_CORE_CLK_GATING_EN_BMSK = 0x1, + S_SWAY_CLK_CTRL_CORE_CLK_GATING_EN_SHFT = 0x0, +}; + +#define S_SWAY_RCH_SEL_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000210) +enum bimc_s_sway_rch_sel { + S_SWAY_RCH_SEL_RMSK = 0x7f, + S_SWAY_RCH_SEL_UNUSED_BMSK = 0x7f, + S_SWAY_RCH_SEL_UNUSED_SHFT = 0x0, +}; + + +#define S_SWAY_MAX_OUTSTANDING_REQS_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000220) +enum bimc_s_sway_max_outstanding_reqs { + S_SWAY_MAX_OUTSTANDING_REQS_RMSK = 0xffff, + S_SWAY_MAX_OUTSTANDING_REQS_WRITE_BMSK = 0xff00, + S_SWAY_MAX_OUTSTANDING_REQS_WRITE_SHFT = 0x8, + S_SWAY_MAX_OUTSTANDING_REQS_READ_BMSK = 0xff, + S_SWAY_MAX_OUTSTANDING_REQS_READ_SHFT = 0x0, +}; + + +#define S_SWAY_BUF_STATUS_0_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000400) +enum bimc_s_sway_buf_status_0 { + S_SWAY_BUF_STATUS_0_RMSK = 0xf0300f03, + S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_FULL_BMSK = 0x80000000, + S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_FULL_SHFT = 0x1f, + S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_EMPTY_BMSK = 0x40000000, + S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_EMPTY_SHFT = 0x1e, + S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_FULL_BMSK = 0x20000000, + S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_FULL_SHFT = 0x1d, + S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_EMPTY_BMSK = 0x10000000, + S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_EMPTY_SHFT = 0x1c, + S_SWAY_BUF_STATUS_0_BCH_RD_FULL_BMSK = 0x200000, + S_SWAY_BUF_STATUS_0_BCH_RD_FULL_SHFT = 0x15, + S_SWAY_BUF_STATUS_0_BCH_RD_EMPTY_BMSK = 0x100000, + S_SWAY_BUF_STATUS_0_BCH_RD_EMPTY_SHFT = 0x14, + S_SWAY_BUF_STATUS_0_WCH_DATA_WR_FULL_BMSK = 0x800, + S_SWAY_BUF_STATUS_0_WCH_DATA_WR_FULL_SHFT = 0xb, + S_SWAY_BUF_STATUS_0_WCH_DATA_WR_EMPTY_BMSK = 0x400, + S_SWAY_BUF_STATUS_0_WCH_DATA_WR_EMPTY_SHFT = 0xa, + S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_FULL_BMSK = 0x200, + S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_FULL_SHFT = 0x9, + S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_EMPTY_BMSK = 0x100, + S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_EMPTY_SHFT = 0x8, + S_SWAY_BUF_STATUS_0_ACH_WR_FULL_BMSK = 0x2, + S_SWAY_BUF_STATUS_0_ACH_WR_FULL_SHFT = 0x1, + S_SWAY_BUF_STATUS_0_ACH_WR_EMPTY_BMSK = 0x1, + S_SWAY_BUF_STATUS_0_ACH_WR_EMPTY_SHFT = 0x0, +}; + +#define S_SWAY_BUF_STATUS_1_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000410) +enum bimc_s_sway_buf_status_1 { + S_SWAY_BUF_STATUS_1_RMSK = 0xf0, + S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_FULL_BMSK = 0x80, + S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_FULL_SHFT = 0x7, + S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_EMPTY_BMSK = 0x40, + S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_EMPTY_SHFT = 0x6, + S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_FULL_BMSK = 0x20, + S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_FULL_SHFT = 0x5, + S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_EMPTY_BMSK = 0x10, + S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_EMPTY_SHFT = 0x4, +}; + +#define S_SWAY_BUF_STATUS_2_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000420) +enum bimc_s_sway_buf_status_2 { + S_SWAY_BUF_STATUS_2_RMSK = 0x30, + S_SWAY_BUF_STATUS_2_QCH_RD_FULL_BMSK = 0x20, + S_SWAY_BUF_STATUS_2_QCH_RD_FULL_SHFT = 0x5, + S_SWAY_BUF_STATUS_2_QCH_RD_EMPTY_BMSK = 0x10, + S_SWAY_BUF_STATUS_2_QCH_RD_EMPTY_SHFT = 0x4, +}; + +/* S_ARB_GENERIC */ + +#define S_ARB_REG_BASE(b) ((b) + 0x00049000) + +#define S_ARB_COMPONENT_INFO_ADDR(b, n) \ + (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000000) +enum bimc_s_arb_component_info { + S_ARB_COMPONENT_INFO_RMSK = 0xffffff, + S_ARB_COMPONENT_INFO_INSTANCE_BMSK = 0xff0000, + S_ARB_COMPONENT_INFO_INSTANCE_SHFT = 0x10, + S_ARB_COMPONENT_INFO_SUB_TYPE_BMSK = 0xff00, + S_ARB_COMPONENT_INFO_SUB_TYPE_SHFT = 0x8, + S_ARB_COMPONENT_INFO_TYPE_BMSK = 0xff, + S_ARB_COMPONENT_INFO_TYPE_SHFT = 0x0, +}; + +#define S_ARB_CONFIG_INFO_0_ADDR(b, n) \ + (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000020) +enum bimc_s_arb_config_info_0 { + S_ARB_CONFIG_INFO_0_RMSK = 0x800000ff, + S_ARB_CONFIG_INFO_0_ARB2SW_PIPELINE_EN_BMSK = 0x80000000, + S_ARB_CONFIG_INFO_0_ARB2SW_PIPELINE_EN_SHFT = 0x1f, + S_ARB_CONFIG_INFO_0_FUNC_BMSK = 0xff, + S_ARB_CONFIG_INFO_0_FUNC_SHFT = 0x0, +}; + +#define S_ARB_CONFIG_INFO_1_ADDR(b, n) \ + (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000030) +enum bimc_s_arb_config_info_1 { + S_ARB_CONFIG_INFO_1_RMSK = 0xffffffff, + S_ARB_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK = 0xffffffff, + S_ARB_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT = 0x0, +}; + +#define S_ARB_CLK_CTRL_ADDR(b) \ + (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000200) +enum bimc_s_arb_clk_ctrl { + S_ARB_CLK_CTRL_RMSK = 0x1, + S_ARB_CLK_CTRL_SLAVE_CLK_GATING_EN_BMSK = 0x2, + S_ARB_CLK_CTRL_SLAVE_CLK_GATING_EN_SHFT = 0x1, + S_ARB_CLK_CTRL_CORE_CLK_GATING_EN_BMSK = 0x1, + S_ARB_CLK_CTRL_CORE_CLK_GATING_EN_SHFT = 0x0, + S_ARB_CLK_CTRL_CLK_GATING_EN_BMSK = 0x1, + S_ARB_CLK_CTRL_CLK_GATING_EN_SHFT = 0x0, +}; + +#define S_ARB_MODE_ADDR(b, n) \ + (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000210) +enum bimc_s_arb_mode { + S_ARB_MODE_RMSK = 0xf0000001, + S_ARB_MODE_WR_GRANTS_AHEAD_BMSK = 0xf0000000, + S_ARB_MODE_WR_GRANTS_AHEAD_SHFT = 0x1c, + S_ARB_MODE_PRIO_RR_EN_BMSK = 0x1, + S_ARB_MODE_PRIO_RR_EN_SHFT = 0x0, +}; + +#define BKE_HEALTH_MASK \ + (M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\ + M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\ + M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK) + +#define BKE_HEALTH_VAL(limit, areq, plvl) \ + ((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \ + M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \ + (((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \ + M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \ + (((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \ + M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)) + +#define MAX_GRANT_PERIOD \ + (M_BKE_GP_GP_BMSK >> \ + M_BKE_GP_GP_SHFT) + +#define MAX_GC \ + (M_BKE_GC_GC_BMSK >> \ + (M_BKE_GC_GC_SHFT + 1)) + +static int bimc_div(int64_t *a, uint32_t b) +{ + if ((*a > 0) && (*a < b)) { + *a = 0; + return 1; + } else { + return do_div(*a, b); + } +} + +#define ENABLE(val) ((val) == 1 ? 1 : 0) +void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo, + uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate) +{ + uint32_t val, mask, reg_val; + void __iomem *addr; + + reg_val = readl_relaxed(M_CLK_CTRL_ADDR(binfo->base, + mas_index)) & M_CLK_CTRL_RMSK; + addr = M_CLK_CTRL_ADDR(binfo->base, mas_index); + mask = (M_CLK_CTRL_MAS_CLK_GATING_EN_BMSK | + M_CLK_CTRL_CORE_CLK_GATING_EN_BMSK); + val = (bgate->core_clk_gate_en << + M_CLK_CTRL_MAS_CLK_GATING_EN_SHFT) | + bgate->port_clk_gate_en; + writel_relaxed(((reg_val & (~mask)) | (val & mask)), addr); + /* Ensure clock gating enable mask is set before exiting */ + wmb(); +} + +void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo, + uint32_t slv_index, bool en) +{ + uint32_t reg_val, reg_mask_val, enable, val; + + reg_mask_val = (readl_relaxed(S_ARB_CONFIG_INFO_0_ADDR(binfo-> + base, slv_index)) & S_ARB_CONFIG_INFO_0_FUNC_BMSK) + >> S_ARB_CONFIG_INFO_0_FUNC_SHFT; + enable = ENABLE(en); + val = enable << S_ARB_MODE_PRIO_RR_EN_SHFT; + if (reg_mask_val == BIMC_ARB_MODE_PRIORITY_RR) { + reg_val = readl_relaxed(S_ARB_CONFIG_INFO_0_ADDR(binfo-> + base, slv_index)) & S_ARB_MODE_RMSK; + writel_relaxed(((reg_val & (~(S_ARB_MODE_PRIO_RR_EN_BMSK))) | + (val & S_ARB_MODE_PRIO_RR_EN_BMSK)), + S_ARB_MODE_ADDR(binfo->base, slv_index)); + /* Ensure arbitration mode is set before returning */ + wmb(); + } +} + +static void set_qos_mode(void __iomem *baddr, uint32_t index, uint32_t val0, + uint32_t val1, uint32_t val2) +{ + uint32_t reg_val, val; + + reg_val = readl_relaxed(M_PRIOLVL_OVERRIDE_ADDR(baddr, + index)) & M_PRIOLVL_OVERRIDE_RMSK; + val = val0 << M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT; + writel_relaxed(((reg_val & ~(M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK)) + | (val & M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK)), + M_PRIOLVL_OVERRIDE_ADDR(baddr, index)); + reg_val = readl_relaxed(M_RD_CMD_OVERRIDE_ADDR(baddr, index)) & + M_RD_CMD_OVERRIDE_RMSK; + val = val1 << M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT; + writel_relaxed(((reg_val & ~(M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK + )) | (val & M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK)), + M_RD_CMD_OVERRIDE_ADDR(baddr, index)); + reg_val = readl_relaxed(M_WR_CMD_OVERRIDE_ADDR(baddr, index)) & + M_WR_CMD_OVERRIDE_RMSK; + val = val2 << M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT; + writel_relaxed(((reg_val & ~(M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK + )) | (val & M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK)), + M_WR_CMD_OVERRIDE_ADDR(baddr, index)); + /* Ensure the priority register writes go through */ + wmb(); +} + +static void msm_bus_bimc_set_qos_mode(void __iomem *base, + uint32_t mas_index, uint8_t qmode_sel) +{ + uint32_t reg_val, val; + + switch (qmode_sel) { + case BIMC_QOS_MODE_FIXED: + reg_val = readl_relaxed(M_BKE_EN_ADDR(base, + mas_index)); + writel_relaxed((reg_val & (~M_BKE_EN_EN_BMSK)), + M_BKE_EN_ADDR(base, mas_index)); + /* Ensure that the book-keeping register writes + * go through before setting QoS mode. + * QoS mode registers might write beyond 1K + * boundary in future + */ + wmb(); + set_qos_mode(base, mas_index, 1, 1, 1); + break; + + case BIMC_QOS_MODE_BYPASS: + reg_val = readl_relaxed(M_BKE_EN_ADDR(base, + mas_index)); + writel_relaxed((reg_val & (~M_BKE_EN_EN_BMSK)), + M_BKE_EN_ADDR(base, mas_index)); + /* Ensure that the book-keeping register writes + * go through before setting QoS mode. + * QoS mode registers might write beyond 1K + * boundary in future + */ + wmb(); + set_qos_mode(base, mas_index, 0, 0, 0); + break; + + case BIMC_QOS_MODE_REGULATOR: + case BIMC_QOS_MODE_LIMITER: + set_qos_mode(base, mas_index, 0, 0, 0); + reg_val = readl_relaxed(M_BKE_EN_ADDR(base, + mas_index)); + val = 1 << M_BKE_EN_EN_SHFT; + /* Ensure that the book-keeping register writes + * go through before setting QoS mode. + * QoS mode registers might write beyond 1K + * boundary in future + */ + wmb(); + writel_relaxed(((reg_val & (~M_BKE_EN_EN_BMSK)) | (val & + M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(base, + mas_index)); + break; + default: + break; + } +} + +static void set_qos_prio_rl(void __iomem *addr, uint32_t rmsk, + uint8_t index, struct msm_bus_bimc_qos_mode *qmode) +{ + uint32_t reg_val, val0, val; + + /* Note, addr is already passed with right mas_index */ + reg_val = readl_relaxed(addr) & rmsk; + val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands, + qmode->rl.qhealth[index].areq_prio, + qmode->rl.qhealth[index].prio_level); + val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK)); + writel_relaxed(val, addr); + /* Ensure that priority for regulator/limiter modes are + * set before returning + */ + wmb(); + +} + +static void msm_bus_bimc_set_qos_prio(void __iomem *base, + uint32_t mas_index, uint8_t qmode_sel, + struct msm_bus_bimc_qos_mode *qmode) +{ + uint32_t reg_val, val; + + switch (qmode_sel) { + case BIMC_QOS_MODE_FIXED: + reg_val = readl_relaxed(M_PRIOLVL_OVERRIDE_ADDR( + base, mas_index)) & M_PRIOLVL_OVERRIDE_RMSK; + val = qmode->fixed.prio_level << + M_PRIOLVL_OVERRIDE_SHFT; + writel_relaxed(((reg_val & + ~(M_PRIOLVL_OVERRIDE_BMSK)) | (val + & M_PRIOLVL_OVERRIDE_BMSK)), + M_PRIOLVL_OVERRIDE_ADDR(base, mas_index)); + + reg_val = readl_relaxed(M_RD_CMD_OVERRIDE_ADDR( + base, mas_index)) & M_RD_CMD_OVERRIDE_RMSK; + val = qmode->fixed.areq_prio_rd << + M_RD_CMD_OVERRIDE_AREQPRIO_SHFT; + writel_relaxed(((reg_val & ~(M_RD_CMD_OVERRIDE_AREQPRIO_BMSK)) + | (val & M_RD_CMD_OVERRIDE_AREQPRIO_BMSK)), + M_RD_CMD_OVERRIDE_ADDR(base, mas_index)); + + reg_val = readl_relaxed(M_WR_CMD_OVERRIDE_ADDR( + base, mas_index)) & M_WR_CMD_OVERRIDE_RMSK; + val = qmode->fixed.areq_prio_wr << + M_WR_CMD_OVERRIDE_AREQPRIO_SHFT; + writel_relaxed(((reg_val & ~(M_WR_CMD_OVERRIDE_AREQPRIO_BMSK)) + | (val & M_WR_CMD_OVERRIDE_AREQPRIO_BMSK)), + M_WR_CMD_OVERRIDE_ADDR(base, mas_index)); + /* Ensure that fixed mode register writes go through + * before returning + */ + wmb(); + break; + + case BIMC_QOS_MODE_REGULATOR: + case BIMC_QOS_MODE_LIMITER: + set_qos_prio_rl(M_BKE_HEALTH_3_CONFIG_ADDR(base, + mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode); + set_qos_prio_rl(M_BKE_HEALTH_2_CONFIG_ADDR(base, + mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode); + set_qos_prio_rl(M_BKE_HEALTH_1_CONFIG_ADDR(base, + mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode); + set_qos_prio_rl(M_BKE_HEALTH_0_CONFIG_ADDR(base, + mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0 , qmode); + break; + case BIMC_QOS_MODE_BYPASS: + default: + break; + } +} + +static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index, + int32_t th, int32_t tm, int32_t tl, uint32_t gp, + uint32_t gc) +{ + int32_t reg_val, val; + int32_t bke_reg_val; + int16_t val2; + + /* Disable BKE before writing to registers as per spec */ + bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index)); + writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)), + M_BKE_EN_ADDR(baddr, mas_index)); + + /* Write values of registers calculated */ + reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index)) + & M_BKE_GP_RMSK; + val = gp << M_BKE_GP_GP_SHFT; + writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val & + M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index)); + + reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) & + M_BKE_GC_RMSK; + val = gc << M_BKE_GC_GC_SHFT; + writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val & + M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index)); + + reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) & + M_BKE_THH_RMSK; + val = th << M_BKE_THH_THRESH_SHFT; + writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val & + M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index)); + + reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) & + M_BKE_THM_RMSK; + val2 = tm << M_BKE_THM_THRESH_SHFT; + writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 & + M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index)); + + reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) & + M_BKE_THL_RMSK; + val2 = tl << M_BKE_THL_THRESH_SHFT; + writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) | + (val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr, + mas_index)); + + /* Ensure that all bandwidth register writes have completed + * before returning + */ + wmb(); +} + +static void msm_bus_bimc_set_qos_bw(void __iomem *base, uint32_t qos_freq, + uint32_t mas_index, struct msm_bus_bimc_qos_bw *qbw) +{ + uint32_t bke_en; + + /* Validate QOS Frequency */ + if (qos_freq == 0) { + MSM_BUS_DBG("Zero frequency\n"); + return; + } + + /* Get enable bit for BKE before programming the period */ + bke_en = (readl_relaxed(M_BKE_EN_ADDR(base, mas_index)) & + M_BKE_EN_EN_BMSK) >> M_BKE_EN_EN_SHFT; + + /* Only calculate if there's a requested bandwidth and window */ + if (qbw->bw && qbw->ws) { + int64_t th, tm, tl; + uint32_t gp, gc; + int64_t gp_nominal, gp_required, gp_calc, data, temp; + int64_t win = qbw->ws * qos_freq; + temp = win; + /* + * Calculate nominal grant period defined by requested + * window size. + * Ceil this value to max grant period. + */ + bimc_div(&temp, 1000000); + gp_nominal = min_t(uint64_t, MAX_GRANT_PERIOD, temp); + /* + * Calculate max window size, defined by bw request. + * Units: (KHz, MB/s) + */ + gp_calc = MAX_GC * qos_freq * 1000; + gp_required = gp_calc; + bimc_div(&gp_required, qbw->bw); + + /* User min of two grant periods */ + gp = min_t(int64_t, gp_nominal, gp_required); + + /* Calculate bandwith in grants and ceil. */ + temp = qbw->bw * gp; + data = qos_freq * 1000; + bimc_div(&temp, data); + gc = min_t(int64_t, MAX_GC, temp); + + /* Calculate thresholds */ + th = qbw->bw - qbw->thh; + tm = qbw->bw - qbw->thm; + tl = qbw->bw - qbw->thl; + + th = th * gp; + bimc_div(&th, data); + tm = tm * gp; + bimc_div(&tm, data); + tl = tl * gp; + bimc_div(&tl, data); + + MSM_BUS_DBG("BIMC: BW: mas_index: %d, th: %llu tm: %llu\n", + mas_index, th, tm); + MSM_BUS_DBG("BIMC: tl: %llu gp:%u gc: %u bke_en: %u\n", + tl, gp, gc, bke_en); + set_qos_bw_regs(base, mas_index, th, tm, tl, gp, gc); + } else + /* Clear bandwidth registers */ + set_qos_bw_regs(base, mas_index, 0, 0, 0, 0, 0); +} + +static int msm_bus_bimc_allocate_commit_data(struct msm_bus_fabric_registration + *fab_pdata, void **cdata, int ctx) +{ + struct msm_bus_bimc_commit **cd = (struct msm_bus_bimc_commit **)cdata; + struct msm_bus_bimc_info *binfo = + (struct msm_bus_bimc_info *)fab_pdata->hw_data; + + MSM_BUS_DBG("Allocating BIMC commit data\n"); + *cd = kzalloc(sizeof(struct msm_bus_bimc_commit), GFP_KERNEL); + if (!*cd) { + MSM_BUS_DBG("Couldn't alloc mem for cdata\n"); + return -ENOMEM; + } + + (*cd)->mas = binfo->cdata[ctx].mas; + (*cd)->slv = binfo->cdata[ctx].slv; + + return 0; +} + +static void *msm_bus_bimc_allocate_bimc_data(struct platform_device *pdev, + struct msm_bus_fabric_registration *fab_pdata) +{ + struct resource *bimc_mem; + struct resource *bimc_io; + struct msm_bus_bimc_info *binfo; + int i; + + MSM_BUS_DBG("Allocating BIMC data\n"); + binfo = kzalloc(sizeof(struct msm_bus_bimc_info), GFP_KERNEL); + if (!binfo) { + WARN(!binfo, "Couldn't alloc mem for bimc_info\n"); + return NULL; + } + + binfo->qos_freq = fab_pdata->qos_freq; + + binfo->params.nmasters = fab_pdata->nmasters; + binfo->params.nslaves = fab_pdata->nslaves; + binfo->params.bus_id = fab_pdata->id; + + for (i = 0; i < NUM_CTX; i++) { + binfo->cdata[i].mas = kzalloc(sizeof(struct + msm_bus_node_hw_info) * fab_pdata->nmasters * 2, + GFP_KERNEL); + if (!binfo->cdata[i].mas) { + MSM_BUS_ERR("Couldn't alloc mem for bimc master hw\n"); + kfree(binfo); + return NULL; + } + + binfo->cdata[i].slv = kzalloc(sizeof(struct + msm_bus_node_hw_info) * fab_pdata->nslaves * 2, + GFP_KERNEL); + if (!binfo->cdata[i].slv) { + MSM_BUS_DBG("Couldn't alloc mem for bimc slave hw\n"); + kfree(binfo->cdata[i].mas); + kfree(binfo); + return NULL; + } + } + + if (fab_pdata->virt) { + MSM_BUS_DBG("Don't get memory regions for virtual fabric\n"); + goto skip_mem; + } + + bimc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!bimc_mem) { + MSM_BUS_ERR("Cannot get BIMC Base address\n"); + kfree(binfo); + return NULL; + } + + bimc_io = request_mem_region(bimc_mem->start, + resource_size(bimc_mem), pdev->name); + if (!bimc_io) { + MSM_BUS_ERR("BIMC memory unavailable\n"); + kfree(binfo); + return NULL; + } + + binfo->base = ioremap(bimc_mem->start, resource_size(bimc_mem)); + if (!binfo->base) { + MSM_BUS_ERR("IOremap failed for BIMC!\n"); + release_mem_region(bimc_mem->start, resource_size(bimc_mem)); + kfree(binfo); + return NULL; + } + +skip_mem: + fab_pdata->hw_data = (void *)binfo; + return (void *)binfo; +} + +static void free_commit_data(void *cdata) +{ + struct msm_bus_bimc_commit *cd = (struct msm_bus_bimc_commit *)cdata; + + kfree(cd->mas); + kfree(cd->slv); + kfree(cd); +} + +static void bke_switch( + void __iomem *baddr, uint32_t mas_index, bool req, int mode) +{ + uint32_t reg_val, val, cur_val; + + val = req << M_BKE_EN_EN_SHFT; + reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index)); + cur_val = reg_val & M_BKE_EN_RMSK; + if (val == cur_val) + return; + + if (!req && mode == BIMC_QOS_MODE_FIXED) + set_qos_mode(baddr, mas_index, 1, 1, 1); + + writel_relaxed(((reg_val & ~(M_BKE_EN_EN_BMSK)) | (val & + M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(baddr, mas_index)); + /* Make sure BKE on/off goes through before changing priorities */ + wmb(); + + if (req) + set_qos_mode(baddr, mas_index, 0, 0, 0); +} + +static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq, + int mport, struct msm_bus_bimc_qos_bw *qbw) +{ + int32_t bw_mbps, thh = 0, thm, thl, gc; + int32_t gp; + u64 temp; + + if (qos_freq == 0) { + MSM_BUS_DBG("No QoS Frequency.\n"); + return; + } + + if (!(qbw->bw && qbw->gp)) { + MSM_BUS_DBG("No QoS Bandwidth or Window size\n"); + return; + } + + /* Convert bandwidth to MBPS */ + temp = qbw->bw; + bimc_div(&temp, 1000000); + bw_mbps = temp; + + /* Grant period in clock cycles + * Grant period from bandwidth structure + * is in nano seconds, QoS freq is in KHz. + * Divide by 1000 to get clock cycles. + */ + gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC); + + /* Grant count = BW in MBps * Grant period + * in micro seconds + */ + gc = bw_mbps * (qbw->gp / NSEC_PER_USEC); + gc = min(gc, MAX_GC); + + /* Medium threshold = -((Medium Threshold percentage * + * Grant count) / 100) + */ + thm = -((qbw->thmp * gc) / 100); + qbw->thm = thm; + + /* Low threshold = -(Grant count) */ + thl = -gc; + qbw->thl = thl; + + MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d", + __func__, gp, gc, thm, thl, thh); + + trace_bus_bke_params(gc, gp, thl, thm, thl); + set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc); +} + +static void msm_bus_bimc_config_master( + struct msm_bus_fabric_registration *fab_pdata, + struct msm_bus_inode_info *info, + uint64_t req_clk, uint64_t req_bw) +{ + int mode, i, ports; + struct msm_bus_bimc_info *binfo; + uint64_t bw = 0; + + binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data; + ports = info->node_info->num_mports; + + /** + * Here check the details of dual configuration. + * Take actions based on different modes. + * Check for threshold if limiter mode, etc. + */ + + if (req_clk <= info->node_info->th[0]) { + mode = info->node_info->mode; + bw = info->node_info->bimc_bw[0]; + } else if ((info->node_info->num_thresh > 1) && + (req_clk <= info->node_info->th[1])) { + mode = info->node_info->mode; + bw = info->node_info->bimc_bw[1]; + } else + mode = info->node_info->mode_thresh; + + switch (mode) { + case BIMC_QOS_MODE_BYPASS: + case BIMC_QOS_MODE_FIXED: + for (i = 0; i < ports; i++) + bke_switch(binfo->base, info->node_info->qport[i], + BKE_OFF, mode); + break; + case BIMC_QOS_MODE_REGULATOR: + case BIMC_QOS_MODE_LIMITER: + for (i = 0; i < ports; i++) { + /* If not in fixed mode, update bandwidth */ + if ((info->node_info->cur_lim_bw != bw) + && (mode != BIMC_QOS_MODE_FIXED)) { + struct msm_bus_bimc_qos_bw qbw; + qbw.ws = info->node_info->ws; + qbw.bw = bw; + qbw.gp = info->node_info->bimc_gp; + qbw.thmp = info->node_info->bimc_thmp; + bimc_set_static_qos_bw(binfo->base, + binfo->qos_freq, + info->node_info->qport[i], &qbw); + info->node_info->cur_lim_bw = bw; + MSM_BUS_DBG("%s: Qos is %d reqclk %llu bw %llu", + __func__, mode, req_clk, bw); + } + bke_switch(binfo->base, info->node_info->qport[i], + BKE_ON, mode); + } + break; + default: + break; + } +} + +static void msm_bus_bimc_update_bw(struct msm_bus_inode_info *hop, + struct msm_bus_inode_info *info, + struct msm_bus_fabric_registration *fab_pdata, + void *sel_cdata, int *master_tiers, + int64_t add_bw) +{ + struct msm_bus_bimc_info *binfo; + struct msm_bus_bimc_qos_bw qbw; + int i; + int64_t bw; + int ports = info->node_info->num_mports; + struct msm_bus_bimc_commit *sel_cd = + (struct msm_bus_bimc_commit *)sel_cdata; + + MSM_BUS_DBG("BIMC: Update bw for ID %d, with IID: %d: %lld\n", + info->node_info->id, info->node_info->priv_id, add_bw); + + binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data; + + if (info->node_info->num_mports == 0) { + MSM_BUS_DBG("BIMC: Skip Master BW\n"); + goto skip_mas_bw; + } + + ports = info->node_info->num_mports; + bw = INTERLEAVED_BW(fab_pdata, add_bw, ports); + + for (i = 0; i < ports; i++) { + sel_cd->mas[info->node_info->masterp[i]].bw += bw; + sel_cd->mas[info->node_info->masterp[i]].hw_id = + info->node_info->mas_hw_id; + MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n", + info->node_info->priv_id, + sel_cd->mas[info->node_info->masterp[i]].bw); + if (info->node_info->hw_sel == MSM_BUS_RPM) + sel_cd->mas[info->node_info->masterp[i]].dirty = 1; + else { + if (!info->node_info->qport) { + MSM_BUS_DBG("No qos ports to update!\n"); + break; + } + if (!(info->node_info->mode == BIMC_QOS_MODE_REGULATOR) + || (info->node_info->mode == + BIMC_QOS_MODE_LIMITER)) { + MSM_BUS_DBG("Skip QoS reg programming\n"); + break; + } + + MSM_BUS_DBG("qport: %d\n", info->node_info->qport[i]); + qbw.bw = sel_cd->mas[info->node_info->masterp[i]].bw; + qbw.ws = info->node_info->ws; + /* Threshold low = 90% of bw */ + qbw.thl = div_s64((90 * bw), 100); + /* Threshold medium = bw */ + qbw.thm = bw; + /* Threshold high = 10% more than bw */ + qbw.thh = div_s64((110 * bw), 100); + /* Check if info is a shared master. + * If it is, mark it dirty + * If it isn't, then set QOS Bandwidth. + * Also if dual-conf is set, don't program bw regs. + **/ + if (!info->node_info->dual_conf && + ((info->node_info->mode == BIMC_QOS_MODE_LIMITER) || + (info->node_info->mode == BIMC_QOS_MODE_REGULATOR))) + msm_bus_bimc_set_qos_bw(binfo->base, + binfo->qos_freq, + info->node_info->qport[i], &qbw); + } + } + +skip_mas_bw: + ports = hop->node_info->num_sports; + MSM_BUS_DBG("BIMC: ID: %d, Sports: %d\n", hop->node_info->priv_id, + ports); + + for (i = 0; i < ports; i++) { + sel_cd->slv[hop->node_info->slavep[i]].bw += add_bw; + sel_cd->slv[hop->node_info->slavep[i]].hw_id = + hop->node_info->slv_hw_id; + MSM_BUS_DBG("BIMC: Update slave_bw: ID: %d -> %llu\n", + hop->node_info->priv_id, + sel_cd->slv[hop->node_info->slavep[i]].bw); + MSM_BUS_DBG("BIMC: Update slave_bw: index: %d\n", + hop->node_info->slavep[i]); + /* Check if hop is a shared slave. + * If it is, mark it dirty + * If it isn't, then nothing to be done as the + * slaves are in bypass mode. + **/ + if (hop->node_info->hw_sel == MSM_BUS_RPM) { + MSM_BUS_DBG("Slave dirty: %d, slavep: %d\n", + hop->node_info->priv_id, + hop->node_info->slavep[i]); + sel_cd->slv[hop->node_info->slavep[i]].dirty = 1; + } + } +} + +static int msm_bus_bimc_commit(struct msm_bus_fabric_registration + *fab_pdata, void *hw_data, void **cdata) +{ + MSM_BUS_DBG("\nReached BIMC Commit\n"); + msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata); + return 0; +} + +static void msm_bus_bimc_config_limiter( + struct msm_bus_fabric_registration *fab_pdata, + struct msm_bus_inode_info *info) +{ + struct msm_bus_bimc_info *binfo; + int mode, i, ports; + + binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data; + ports = info->node_info->num_mports; + + if (!info->node_info->qport) { + MSM_BUS_DBG("No QoS Ports to init\n"); + return; + } + + if (info->cur_lim_bw) + mode = BIMC_QOS_MODE_LIMITER; + else + mode = info->node_info->mode; + + switch (mode) { + case BIMC_QOS_MODE_BYPASS: + case BIMC_QOS_MODE_FIXED: + for (i = 0; i < ports; i++) + bke_switch(binfo->base, info->node_info->qport[i], + BKE_OFF, mode); + break; + case BIMC_QOS_MODE_REGULATOR: + case BIMC_QOS_MODE_LIMITER: + if (info->cur_lim_bw != info->cur_prg_bw) { + MSM_BUS_DBG("Enabled BKE throttling node %d to %llu\n", + info->node_info->id, info->cur_lim_bw); + trace_bus_bimc_config_limiter(info->node_info->id, + info->cur_lim_bw); + for (i = 0; i < ports; i++) { + /* If not in fixed mode, update bandwidth */ + struct msm_bus_bimc_qos_bw qbw; + + qbw.ws = info->node_info->ws; + qbw.bw = info->cur_lim_bw; + qbw.gp = info->node_info->bimc_gp; + qbw.thmp = info->node_info->bimc_thmp; + bimc_set_static_qos_bw(binfo->base, + binfo->qos_freq, + info->node_info->qport[i], &qbw); + bke_switch(binfo->base, + info->node_info->qport[i], + BKE_ON, mode); + info->cur_prg_bw = qbw.bw; + } + } + break; + default: + break; + } +} + +static void bimc_init_mas_reg(struct msm_bus_bimc_info *binfo, + struct msm_bus_inode_info *info, + struct msm_bus_bimc_qos_mode *qmode, int mode) +{ + int i; + + switch (mode) { + case BIMC_QOS_MODE_FIXED: + qmode->fixed.prio_level = info->node_info->prio_lvl; + qmode->fixed.areq_prio_rd = info->node_info->prio_rd; + qmode->fixed.areq_prio_wr = info->node_info->prio_wr; + break; + case BIMC_QOS_MODE_LIMITER: + qmode->rl.qhealth[0].limit_commands = 1; + qmode->rl.qhealth[1].limit_commands = 0; + qmode->rl.qhealth[2].limit_commands = 0; + qmode->rl.qhealth[3].limit_commands = 0; + break; + default: + break; + } + + if (!info->node_info->qport) { + MSM_BUS_DBG("No QoS Ports to init\n"); + return; + } + + for (i = 0; i < info->node_info->num_mports; i++) { + /* If not in bypass mode, update priority */ + if (mode != BIMC_QOS_MODE_BYPASS) { + msm_bus_bimc_set_qos_prio(binfo->base, + info->node_info-> + qport[i], mode, qmode); + + /* If not in fixed mode, update bandwidth */ + if (mode != BIMC_QOS_MODE_FIXED) { + struct msm_bus_bimc_qos_bw qbw; + qbw.ws = info->node_info->ws; + qbw.bw = info->node_info->bimc_bw[0]; + qbw.gp = info->node_info->bimc_gp; + qbw.thmp = info->node_info->bimc_thmp; + bimc_set_static_qos_bw(binfo->base, + binfo->qos_freq, + info->node_info->qport[i], &qbw); + } + } + + /* set mode */ + msm_bus_bimc_set_qos_mode(binfo->base, + info->node_info->qport[i], + mode); + } +} + +static void init_health_regs(struct msm_bus_bimc_info *binfo, + struct msm_bus_inode_info *info, + struct msm_bus_bimc_qos_mode *qmode, + int mode) +{ + int i; + + if (mode == BIMC_QOS_MODE_LIMITER) { + qmode->rl.qhealth[0].limit_commands = 1; + qmode->rl.qhealth[1].limit_commands = 0; + qmode->rl.qhealth[2].limit_commands = 0; + qmode->rl.qhealth[3].limit_commands = 0; + + if (!info->node_info->qport) { + MSM_BUS_DBG("No QoS Ports to init\n"); + return; + } + + for (i = 0; i < info->node_info->num_mports; i++) { + /* If not in bypass mode, update priority */ + if (mode != BIMC_QOS_MODE_BYPASS) + msm_bus_bimc_set_qos_prio(binfo->base, + info->node_info->qport[i], mode, qmode); + } + } +} + + +static int msm_bus_bimc_mas_init(struct msm_bus_bimc_info *binfo, + struct msm_bus_inode_info *info) +{ + struct msm_bus_bimc_qos_mode *qmode; + qmode = kzalloc(sizeof(struct msm_bus_bimc_qos_mode), + GFP_KERNEL); + if (!qmode) { + MSM_BUS_WARN("Couldn't alloc prio data for node: %d\n", + info->node_info->id); + return -ENOMEM; + } + + info->hw_data = (void *)qmode; + + /** + * If the master supports dual configuration, + * configure registers for both modes + */ + if (info->node_info->dual_conf) + bimc_init_mas_reg(binfo, info, qmode, + info->node_info->mode_thresh); + else if (info->node_info->nr_lim) + init_health_regs(binfo, info, qmode, BIMC_QOS_MODE_LIMITER); + + bimc_init_mas_reg(binfo, info, qmode, info->node_info->mode); + return 0; +} + +static void msm_bus_bimc_node_init(void *hw_data, + struct msm_bus_inode_info *info) +{ + struct msm_bus_bimc_info *binfo = + (struct msm_bus_bimc_info *)hw_data; + + if (!IS_SLAVE(info->node_info->priv_id) && + (info->node_info->hw_sel != MSM_BUS_RPM)) + msm_bus_bimc_mas_init(binfo, info); +} + +static int msm_bus_bimc_port_halt(uint32_t haltid, uint8_t mport) +{ + return 0; +} + +static int msm_bus_bimc_port_unhalt(uint32_t haltid, uint8_t mport) +{ + return 0; +} + +static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info, + void __iomem *qos_base, uint32_t qos_off, + uint32_t qos_delta, uint32_t qos_freq, + bool enable_lim, u64 lim_bw) +{ + int mode; + int i; + + if (ZERO_OR_NULL_PTR(info->node_info->qport)) { + MSM_BUS_DBG("No QoS Ports to limit\n"); + return 0; + } + + if (enable_lim && lim_bw) { + mode = BIMC_QOS_MODE_LIMITER; + + if (!info->node_info->lim_bw) { + struct msm_bus_bimc_qos_mode qmode; + qmode.rl.qhealth[0].limit_commands = 1; + qmode.rl.qhealth[1].limit_commands = 0; + qmode.rl.qhealth[2].limit_commands = 0; + qmode.rl.qhealth[3].limit_commands = 0; + + for (i = 0; i < info->node_info->num_qports; i++) { + /* If not in bypass mode, update priority */ + if (mode != BIMC_QOS_MODE_BYPASS) + msm_bus_bimc_set_qos_prio(qos_base, + info->node_info->qport[i], mode, + &qmode); + } + } + + for (i = 0; i < info->node_info->num_qports; i++) { + struct msm_bus_bimc_qos_bw qbw; + /* If not in fixed mode, update bandwidth */ + if ((info->node_info->lim_bw != lim_bw)) { + qbw.ws = info->node_info->qos_params.ws; + qbw.bw = lim_bw; + qbw.gp = info->node_info->qos_params.gp; + qbw.thmp = info->node_info->qos_params.thmp; + bimc_set_static_qos_bw(qos_base, qos_freq, + info->node_info->qport[i], &qbw); + } + bke_switch(qos_base, info->node_info->qport[i], + BKE_ON, mode); + } + info->node_info->lim_bw = lim_bw; + } else { + mode = info->node_info->qos_params.mode; + for (i = 0; i < info->node_info->num_qports; i++) { + bke_switch(qos_base, info->node_info->qport[i], + BKE_OFF, mode); + } + } + info->node_info->qos_params.cur_mode = mode; + return 0; +} + +static bool msm_bus_bimc_update_bw_reg(int mode) +{ + bool ret = false; + + if ((mode == BIMC_QOS_MODE_LIMITER) + || (mode == BIMC_QOS_MODE_REGULATOR)) + ret = true; + + return ret; +} + +static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info, + void __iomem *qos_base, + uint32_t qos_off, uint32_t qos_delta, + uint32_t qos_freq) +{ + int i; + struct msm_bus_bimc_qos_mode qmode; + + switch (info->node_info->qos_params.mode) { + case BIMC_QOS_MODE_FIXED: + qmode.fixed.prio_level = info->node_info->qos_params.prio_lvl; + qmode.fixed.areq_prio_rd = info->node_info->qos_params.prio_rd; + qmode.fixed.areq_prio_wr = info->node_info->qos_params.prio_wr; + break; + case BIMC_QOS_MODE_LIMITER: + qmode.rl.qhealth[0].limit_commands = 1; + qmode.rl.qhealth[1].limit_commands = 0; + qmode.rl.qhealth[2].limit_commands = 0; + qmode.rl.qhealth[3].limit_commands = 0; + break; + default: + break; + } + + if (ZERO_OR_NULL_PTR(info->node_info->qport)) { + MSM_BUS_DBG("No QoS Ports to init\n"); + return 0; + } + + for (i = 0; i < info->node_info->num_qports; i++) { + /* If not in bypass mode, update priority */ + if (info->node_info->qos_params.mode != BIMC_QOS_MODE_BYPASS) + msm_bus_bimc_set_qos_prio(qos_base, info->node_info-> + qport[i], info->node_info->qos_params.mode, + &qmode); + + /* set mode */ + if (info->node_info->qos_params.mode == BIMC_QOS_MODE_LIMITER) + bke_switch(qos_base, info->node_info->qport[i], + BKE_OFF, BIMC_QOS_MODE_FIXED); + else + msm_bus_bimc_set_qos_mode(qos_base, + info->node_info->qport[i], + info->node_info->qos_params.mode); + } + + return 0; +} + +static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev, + void __iomem *qos_base, uint32_t qos_off, + uint32_t qos_delta, uint32_t qos_freq) +{ + struct msm_bus_bimc_qos_bw qbw; + int i; + int64_t bw = 0; + int ret = 0; + struct msm_bus_node_info_type *info = dev->node_info; + + if (info && info->num_qports && + ((info->qos_params.mode == BIMC_QOS_MODE_LIMITER) || + (info->qos_params.mode == BIMC_QOS_MODE_REGULATOR))) { + bw = msm_bus_div64(info->num_qports, + dev->node_ab.ab[DUAL_CTX]); + + for (i = 0; i < info->num_qports; i++) { + MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n", + info->id, bw); + + if (!info->qport) { + MSM_BUS_DBG("No qos ports to update!\n"); + break; + } + + qbw.bw = bw + info->qos_params.bw_buffer; + trace_bus_bimc_config_limiter(info->id, bw); + + /* Default to gp of 5us */ + qbw.gp = (info->qos_params.gp ? + info->qos_params.gp : 5000); + /* Default to thmp of 50% */ + qbw.thmp = (info->qos_params.thmp ? + info->qos_params.thmp : 50); + /* + * If the BW vote is 0 then set the QoS mode to + * Fixed. + */ + if (bw) { + bimc_set_static_qos_bw(qos_base, qos_freq, + info->qport[i], &qbw); + bke_switch(qos_base, info->qport[i], + BKE_ON, info->qos_params.mode); + } else { + bke_switch(qos_base, info->qport[i], + BKE_OFF, BIMC_QOS_MODE_FIXED); + } + } + } + return ret; +} + +int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata, + struct msm_bus_hw_algorithm *hw_algo) +{ + /* Set interleaving to true by default */ + MSM_BUS_DBG("\nInitializing BIMC...\n"); + pdata->il_flag = true; + hw_algo->allocate_commit_data = msm_bus_bimc_allocate_commit_data; + hw_algo->allocate_hw_data = msm_bus_bimc_allocate_bimc_data; + hw_algo->node_init = msm_bus_bimc_node_init; + hw_algo->free_commit_data = free_commit_data; + hw_algo->update_bw = msm_bus_bimc_update_bw; + hw_algo->commit = msm_bus_bimc_commit; + hw_algo->port_halt = msm_bus_bimc_port_halt; + hw_algo->port_unhalt = msm_bus_bimc_port_unhalt; + hw_algo->config_master = msm_bus_bimc_config_master; + hw_algo->config_limiter = msm_bus_bimc_config_limiter; + hw_algo->update_bw_reg = msm_bus_bimc_update_bw_reg; + /* BIMC slaves are shared. Slave registers are set through RPM */ + if (!pdata->ahb) + pdata->rpm_enabled = 1; + return 0; +} + +int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev) +{ + if (!bus_dev) + return -ENODEV; + else { + bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init; + bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw; + bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport; + bus_dev->fabdev->noc_ops.update_bw_reg = + msm_bus_bimc_update_bw_reg; + } + return 0; +} +EXPORT_SYMBOL(msm_bus_bimc_set_ops); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc.h b/drivers/soc/qcom/msm_bus/msm_bus_bimc.h new file mode 100644 index 000000000000..12c8325e9863 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc.h @@ -0,0 +1,127 @@ +/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H +#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H + +struct msm_bus_bimc_params { + uint32_t bus_id; + uint32_t addr_width; + uint32_t data_width; + uint32_t nmasters; + uint32_t nslaves; +}; + +struct msm_bus_bimc_commit { + struct msm_bus_node_hw_info *mas; + struct msm_bus_node_hw_info *slv; +}; + +struct msm_bus_bimc_info { + void __iomem *base; + uint32_t base_addr; + uint32_t qos_freq; + struct msm_bus_bimc_params params; + struct msm_bus_bimc_commit cdata[NUM_CTX]; +}; + +struct msm_bus_bimc_node { + uint32_t conn_mask; + uint32_t data_width; + uint8_t slv_arb_mode; +}; + +enum msm_bus_bimc_arb_mode { + BIMC_ARB_MODE_RR = 0, + BIMC_ARB_MODE_PRIORITY_RR, + BIMC_ARB_MODE_TIERED_RR, +}; + + +enum msm_bus_bimc_interleave { + BIMC_INTERLEAVE_NONE = 0, + BIMC_INTERLEAVE_ODD, + BIMC_INTERLEAVE_EVEN, +}; + +struct msm_bus_bimc_slave_seg { + bool enable; + uint64_t start_addr; + uint64_t seg_size; + uint8_t interleave; +}; + +enum msm_bus_bimc_qos_mode_type { + BIMC_QOS_MODE_FIXED = 0, + BIMC_QOS_MODE_LIMITER, + BIMC_QOS_MODE_BYPASS, + BIMC_QOS_MODE_REGULATOR, +}; + +struct msm_bus_bimc_qos_health { + bool limit_commands; + uint32_t areq_prio; + uint32_t prio_level; +}; + +struct msm_bus_bimc_mode_fixed { + uint32_t prio_level; + uint32_t areq_prio_rd; + uint32_t areq_prio_wr; +}; + +struct msm_bus_bimc_mode_rl { + uint8_t qhealthnum; + struct msm_bus_bimc_qos_health qhealth[4]; +}; + +struct msm_bus_bimc_qos_mode { + uint8_t mode; + struct msm_bus_bimc_mode_fixed fixed; + struct msm_bus_bimc_mode_rl rl; +}; + +struct msm_bus_bimc_qos_bw { + uint64_t bw; /* bw is in Bytes/sec */ + uint32_t ws; /* Window size in nano seconds*/ + int64_t thh; /* Threshold high, bytes per second */ + int64_t thm; /* Threshold medium, bytes per second */ + int64_t thl; /* Threshold low, bytes per second */ + u32 gp; /* Grant Period in micro seconds */ + u32 thmp; /* Threshold medium in percentage */ +}; + +struct msm_bus_bimc_clk_gate { + bool core_clk_gate_en; + bool arb_clk_gate_en; /* For arbiter */ + bool port_clk_gate_en; /* For regs on BIMC core clock */ +}; + +void msm_bus_bimc_set_slave_seg(struct msm_bus_bimc_info *binfo, + uint32_t slv_index, uint32_t seg_index, + struct msm_bus_bimc_slave_seg *bsseg); +void msm_bus_bimc_set_slave_clk_gate(struct msm_bus_bimc_info *binfo, + uint32_t slv_index, struct msm_bus_bimc_clk_gate *bgate); +void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo, + uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate); +void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo, + uint32_t slv_index, bool en); +void msm_bus_bimc_get_params(struct msm_bus_bimc_info *binfo, + struct msm_bus_bimc_params *params); +void msm_bus_bimc_get_mas_params(struct msm_bus_bimc_info *binfo, + uint32_t mas_index, struct msm_bus_bimc_node *mparams); +void msm_bus_bimc_get_slv_params(struct msm_bus_bimc_info *binfo, + uint32_t slv_index, struct msm_bus_bimc_node *sparams); +bool msm_bus_bimc_get_arb_en(struct msm_bus_bimc_info *binfo, + uint32_t slv_index); + +#endif /*_ARCH_ARM_MACH_MSM_BUS_BIMC_H*/ diff --git a/drivers/soc/qcom/msm_bus/msm_bus_board_8974.c b/drivers/soc/qcom/msm_bus/msm_bus_board_8974.c new file mode 100644 index 000000000000..1fec726de87e --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_board_8974.c @@ -0,0 +1,2021 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/msm-bus.h> +#include <linux/msm-bus-board.h> +#include "msm_bus_core.h" +#include "msm_bus_noc.h" +#include "msm_bus_bimc.h" + +#define NMASTERS 120 +#define NSLAVES 150 +#define NFAB_8974 7 + +enum msm_bus_8974_master_ports_type { + /* System NOC Masters */ + MASTER_PORT_LPASS_AHB = 0, + MASTER_PORT_QDSS_BAM, + MASTER_PORT_SNOC_CFG, + MASTER_PORT_GW_BIMC_SNOC, + MASTER_PORT_GW_CNOC_SNOC, + MASTER_PORT_CRYPTO_CORE0, + MASTER_PORT_CRYPTO_CORE1, + MASTER_PORT_LPASS_PROC, + MASTER_PORT_MSS, + MASTER_PORT_MSS_NAV, + MASTER_PORT_OCMEM_DMA, + MASTER_PORT_GW_PNOC_SNOC, + MASTER_PORT_WCSS, + MASTER_PORT_QDSS_ETR, + MASTER_PORT_USB3, + + /* MMSS NOC Masters */ + MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG = 0, + MASTER_PORT_GW_CNOC_MNOC_CFG, + MASTER_PORT_GFX3D_PORT0, + MASTER_PORT_GFX3D_PORT1, + MASTER_PORT_JPEG, + MASTER_PORT_MDP, + /* Venus video core */ + MASTER_PORT_VIDEO_PORT0, + MASTER_PORT_VIDEO_PORT1, + MASTER_PORT_VFE = 16, + + /* BIMC Masters */ + MASTER_PORT_KMPSS_M0 = 0, + MASTER_PORT_KMPSS_M1, + MASTER_PORT_MSS_PROC, + MASTER_PORT_GW_MNOC_BIMC_0, + MASTER_PORT_GW_MNOC_BIMC_1, + MASTER_PORT_GW_SNOC_BIMC_0, + MASTER_PORT_GW_SNOC_BIMC_1, + + /* OCMEM NOC Masters */ + MASTER_PORT_CNOC_ONOC_CFG = 0, + MASTER_PORT_JPEG_OCMEM, + MASTER_PORT_MDP_OCMEM, + MASTER_PORT_VIDEO_P0_OCMEM, + MASTER_PORT_VIDEO_P1_OCMEM, + MASTER_PORT_VFE_OCMEM, + + /* Peripheral NOC Masters */ + MASTER_PORT_SDCC_1 = 0, + MASTER_PORT_SDCC_3, + MASTER_PORT_SDCC_2, + MASTER_PORT_SDCC_4, + MASTER_PORT_TSIF, + MASTER_PORT_BAM_DMA, + MASTER_PORT_BLSP_2, + MASTER_PORT_USB_HSIC, + MASTER_PORT_BLSP_1, + MASTER_PORT_USB_HS, + MASTER_PORT_PNOC_CFG, + MASTER_PORT_GW_SNOC_PNOC, + + /* Config NOC Masters */ + MASTER_PORT_RPM_INST = 0, + MASTER_PORT_RPM_DATA, + MASTER_PORT_RPM_SYS, + MASTER_PORT_DEHR, + MASTER_PORT_QDSS_DAP, + MASTER_PORT_SPDM, + MASTER_PORT_TIC, + MASTER_PORT_GW_SNOC_CNOC, +}; + +enum msm_bus_8974_slave_ports_type { + /* System NOC Slaves */ + SLAVE_PORT_KMPSS = 1, + SLAVE_PORT_LPASS, + SLAVE_PORT_USB3 = 4, + SLAVE_PORT_WCSS = 6, + SLAVE_PORT_GW_SNOC_BIMC_P0, + SLAVE_PORT_GW_SNOC_BIMC_P1, + SLAVE_PORT_GW_SNOC_CNOC, + SLAVE_PORT_OCIMEM, + SLAVE_PORT_SNOC_OCMEM, + SLAVE_PORT_GW_SNOC_PNOC, + SLAVE_PORT_SERVICE_SNOC, + SLAVE_PORT_QDSS_STM, + + /* MMSS NOC Slaves */ + SLAVE_PORT_CAMERA_CFG = 0, + SLAVE_PORT_DISPLAY_CFG, + SLAVE_PORT_OCMEM_CFG, + SLAVE_PORT_CPR_CFG, + SLAVE_PORT_CPR_XPU_CFG, + SLAVE_PORT_MISC_CFG = 6, + SLAVE_PORT_MISC_XPU_CFG, + SLAVE_PORT_VENUS_CFG, + SLAVE_PORT_GFX3D_CFG, + SLAVE_PORT_MMSS_CLK_CFG = 11, + SLAVE_PORT_MMSS_CLK_XPU_CFG, + SLAVE_PORT_MNOC_MPU_CFG, + SLAVE_PORT_ONOC_MPU_CFG, + SLAVE_PORT_GW_MMSS_BIMC_P0 = 16, + SLAVE_PORT_GW_MMSS_BIMC_P1, + SLAVE_PORT_SERVICE_MNOC, + + /* BIMC Slaves */ + SLAVE_PORT_EBI1_CH0 = 0, + SLAVE_PORT_EBI1_CH1, + SLAVE_PORT_KMPSS_L2, + SLAVE_PORT_GW_BIMC_SNOC, + + /* OCMEM NOC Slaves */ + SLAVE_PORT_OCMEM_P0 = 0, + SLAVE_PORT_OCMEM_P1, + SLAVE_PORT_SERVICE_ONOC, + + /*Peripheral NOC Slaves */ + SLAVE_PORT_SDCC_1 = 0, + SLAVE_PORT_SDCC_3, + SLAVE_PORT_SDCC_2, + SLAVE_PORT_SDCC_4, + SLAVE_PORT_TSIF, + SLAVE_PORT_BAM_DMA, + SLAVE_PORT_BLSP_2, + SLAVE_PORT_USB_HSIC, + SLAVE_PORT_BLSP_1, + SLAVE_PORT_USB_HS, + SLAVE_PORT_PDM, + SLAVE_PORT_PERIPH_APU_CFG, + SLAVE_PORT_PNOC_MPU_CFG, + SLAVE_PORT_PRNG, + SLAVE_PORT_GW_PNOC_SNOC, + SLAVE_PORT_SERVICE_PNOC, + + /* Config NOC slaves */ + SLAVE_PORT_CLK_CTL = 1, + SLAVE_PORT_CNOC_MSS, + SLAVE_PORT_SECURITY, + SLAVE_PORT_TCSR, + SLAVE_PORT_TLMM, + SLAVE_PORT_CRYPTO_0_CFG, + SLAVE_PORT_CRYPTO_1_CFG, + SLAVE_PORT_IMEM_CFG, + SLAVE_PORT_MESSAGE_RAM, + SLAVE_PORT_BIMC_CFG, + SLAVE_PORT_BOOT_ROM, + SLAVE_PORT_CNOC_MNOC_MMSS_CFG, + SLAVE_PORT_PMIC_ARB, + SLAVE_PORT_SPDM_WRAPPER, + SLAVE_PORT_DEHR_CFG, + SLAVE_PORT_MPM, + SLAVE_PORT_QDSS_CFG, + SLAVE_PORT_RBCPR_CFG, + SLAVE_PORT_RBCPR_QDSS_APU_CFG, + SLAVE_PORT_CNOC_MNOC_CFG, + SLAVE_PORT_SNOC_MPU_CFG, + SLAVE_PORT_CNOC_ONOC_CFG, + SLAVE_PORT_PNOC_CFG, + SLAVE_PORT_SNOC_CFG, + SLAVE_PORT_EBI1_DLL_CFG, + SLAVE_PORT_PHY_APU_CFG, + SLAVE_PORT_EBI1_PHY_CFG, + SLAVE_PORT_RPM, + SLAVE_PORT_GW_CNOC_SNOC, + SLAVE_PORT_SERVICE_CNOC, +}; + +/* Hardware IDs for RPM */ +enum msm_bus_8974_mas_hw_id { + MAS_APPSS_PROC = 0, + MAS_AMSS_PROC, + MAS_MNOC_BIMC, + MAS_SNOC_BIMC, + MAS_CNOC_MNOC_MMSS_CFG, + MAS_CNOC_MNOC_CFG, + MAS_GFX3D, + MAS_JPEG, + MAS_MDP, + MAS_VIDEO_P0, + MAS_VIDEO_P1, + MAS_VFE, + MAS_CNOC_ONOC_CFG, + MAS_JPEG_OCMEM, + MAS_MDP_OCMEM, + MAS_VIDEO_P0_OCMEM, + MAS_VIDEO_P1_OCMEM, + MAS_VFE_OCMEM, + MAS_LPASS_AHB, + MAS_QDSS_BAM, + MAS_SNOC_CFG, + MAS_BIMC_SNOC, + MAS_CNOC_SNOC, + MAS_CRYPTO_CORE0, + MAS_CRYPTO_CORE1, + MAS_LPASS_PROC, + MAS_MSS, + MAS_MSS_NAV, + MAS_OCMEM_DMA, + MAS_PNOC_SNOC, + MAS_WCSS, + MAS_QDSS_ETR, + MAS_USB3, + MAS_SDCC_1, + MAS_SDCC_3, + MAS_SDCC_2, + MAS_SDCC_4, + MAS_TSIF, + MAS_BAM_DMA, + MAS_BLSP_2, + MAS_USB_HSIC, + MAS_BLSP_1, + MAS_USB_HS, + MAS_PNOC_CFG, + MAS_SNOC_PNOC, + MAS_RPM_INST, + MAS_RPM_DATA, + MAS_RPM_SYS, + MAS_DEHR, + MAS_QDSS_DAP, + MAS_SPDM, + MAS_TIC, + MAS_SNOC_CNOC, + MAS_OVNOC_SNOC, + MAS_OVNOC_ONOC, + MAS_V_OCMEM_GFX3D, + MAS_ONOC_OVNOC, + MAS_SNOC_OVNOC, +}; + +enum msm_bus_8974_slv_hw_id { + SLV_EBI = 0, + SLV_APSS_L2, + SLV_BIMC_SNOC, + SLV_CAMERA_CFG, + SLV_DISPLAY_CFG, + SLV_OCMEM_CFG, + SLV_CPR_CFG, + SLV_CPR_XPU_CFG, + SLV_MISC_CFG, + SLV_MISC_XPU_CFG, + SLV_VENUS_CFG, + SLV_GFX3D_CFG, + SLV_MMSS_CLK_CFG, + SLV_MMSS_CLK_XPU_CFG, + SLV_MNOC_MPU_CFG, + SLV_ONOC_MPU_CFG, + SLV_MMSS_BIMC, + SLV_SERVICE_MNOC, + SLV_OCMEM, + SLV_SERVICE_ONOC, + SLV_APPSS, + SLV_LPASS, + SLV_USB3, + SLV_WCSS, + SLV_SNOC_BIMC, + SLV_SNOC_CNOC, + SLV_OCIMEM, + SLV_SNOC_OCMEM, + SLV_SNOC_PNOC, + SLV_SERVICE_SNOC, + SLV_QDSS_STM, + SLV_SDCC_1, + SLV_SDCC_3, + SLV_SDCC_2, + SLV_SDCC_4, + SLV_TSIF, + SLV_BAM_DMA, + SLV_BLSP_2, + SLV_USB_HSIC, + SLV_BLSP_1, + SLV_USB_HS, + SLV_PDM, + SLV_PERIPH_APU_CFG, + SLV_MPU_CFG, + SLV_PRNG, + SLV_PNOC_SNOC, + SLV_SERVICE_PNOC, + SLV_CLK_CTL, + SLV_CNOC_MSS, + SLV_SECURITY, + SLV_TCSR, + SLV_TLMM, + SLV_CRYPTO_0_CFG, + SLV_CRYPTO_1_CFG, + SLV_IMEM_CFG, + SLV_MESSAGE_RAM, + SLV_BIMC_CFG, + SLV_BOOT_ROM, + SLV_CNOC_MNOC_MMSS_CFG, + SLV_PMIC_ARB, + SLV_SPDM_WRAPPER, + SLV_DEHR_CFG, + SLV_MPM, + SLV_QDSS_CFG, + SLV_RBCPR_CFG, + SLV_RBCPR_QDSS_APU_CFG, + SLV_CNOC_MNOC_CFG, + SLV_SNOC_MPU_CFG, + SLV_CNOC_ONOC_CFG, + SLV_PNOC_CFG, + SLV_SNOC_CFG, + SLV_EBI1_DLL_CFG, + SLV_PHY_APU_CFG, + SLV_EBI1_PHY_CFG, + SLV_RPM, + SLV_CNOC_SNOC, + SLV_SERVICE_CNOC, + SLV_SNOC_OVNOC, + SLV_ONOC_OVNOC, + SLV_OVNOC_ONOC, + SLV_OVNOC_SNOC, +}; + +static uint32_t master_iids[NMASTERS]; +static uint32_t slave_iids[NSLAVES]; + +/* System NOC nodes */ +static int mport_lpass_ahb[] = {MASTER_PORT_LPASS_AHB,}; +static int mport_qdss_bam[] = {MASTER_PORT_QDSS_BAM,}; +static int mport_snoc_cfg[] = {MASTER_PORT_SNOC_CFG,}; +static int mport_gw_bimc_snoc[] = {MASTER_PORT_GW_BIMC_SNOC,}; +static int mport_gw_cnoc_snoc[] = {MASTER_PORT_GW_CNOC_SNOC,}; +static int mport_crypto_core0[] = {MASTER_PORT_CRYPTO_CORE0,}; +static int mport_crypto_core1[] = {MASTER_PORT_CRYPTO_CORE1}; +static int mport_lpass_proc[] = {MASTER_PORT_LPASS_PROC}; +static int mport_mss[] = {MASTER_PORT_MSS}; +static int mport_mss_nav[] = {MASTER_PORT_MSS_NAV}; +static int mport_ocmem_dma[] = {MASTER_PORT_OCMEM_DMA}; +static int mport_gw_pnoc_snoc[] = {MASTER_PORT_GW_PNOC_SNOC}; +static int mport_wcss[] = {MASTER_PORT_WCSS}; +static int mport_qdss_etr[] = {MASTER_PORT_QDSS_ETR}; +static int mport_usb3[] = {MASTER_PORT_USB3}; + +static int sport_kmpss[] = {SLAVE_PORT_KMPSS}; +static int sport_lpass[] = {SLAVE_PORT_LPASS}; +static int sport_usb3[] = {SLAVE_PORT_USB3}; +static int sport_wcss[] = {SLAVE_PORT_WCSS}; +static int sport_gw_snoc_bimc[] = { + SLAVE_PORT_GW_SNOC_BIMC_P0, + SLAVE_PORT_GW_SNOC_BIMC_P1, + }; +static int sport_gw_snoc_cnoc[] = {SLAVE_PORT_GW_SNOC_CNOC}; +static int sport_ocimem[] = {SLAVE_PORT_OCIMEM}; +static int sport_snoc_ocmem[] = {SLAVE_PORT_SNOC_OCMEM}; +static int sport_gw_snoc_pnoc[] = {SLAVE_PORT_GW_SNOC_PNOC}; +static int sport_service_snoc[] = {SLAVE_PORT_SERVICE_SNOC}; +static int sport_qdss_stm[] = {SLAVE_PORT_QDSS_STM}; + + +/* MMSS NOC nodes */ +static int mport_gw_cnoc_mnoc_cfg[] = { + MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG, + MASTER_PORT_GW_CNOC_MNOC_CFG, +}; +static int mport_gfx3d[] = { + MASTER_PORT_GFX3D_PORT0, + MASTER_PORT_GFX3D_PORT1, +}; +static int mport_jpeg[] = {MASTER_PORT_JPEG}; +static int mport_mdp[] = {MASTER_PORT_MDP}; +static int mport_video_port0[] = {MASTER_PORT_VIDEO_PORT0}; +static int mport_video_port1[] = {MASTER_PORT_VIDEO_PORT1}; +static int mport_vfe[] = {MASTER_PORT_VFE}; + +static int sport_camera_cfg[] = {SLAVE_PORT_CAMERA_CFG}; +static int sport_display_cfg[] = {SLAVE_PORT_DISPLAY_CFG}; +static int sport_ocmem_cfg[] = {SLAVE_PORT_OCMEM_CFG}; +static int sport_cpr_cfg[] = {SLAVE_PORT_CPR_CFG}; +static int sport_cpr_xpu_cfg[] = {SLAVE_PORT_CPR_XPU_CFG,}; +static int sport_misc_cfg[] = {SLAVE_PORT_MISC_CFG}; +static int sport_misc_xpu_cfg[] = {SLAVE_PORT_MISC_XPU_CFG}; +static int sport_venus_cfg[] = {SLAVE_PORT_VENUS_CFG}; +static int sport_gfx3d_cfg[] = {SLAVE_PORT_GFX3D_CFG}; +static int sport_mmss_clk_cfg[] = {SLAVE_PORT_MMSS_CLK_CFG}; +static int sport_mmss_clk_xpu_cfg[] = { + SLAVE_PORT_MMSS_CLK_XPU_CFG +}; +static int sport_mnoc_mpu_cfg[] = {SLAVE_PORT_MNOC_MPU_CFG}; +static int sport_onoc_mpu_cfg[] = {SLAVE_PORT_ONOC_MPU_CFG}; +static int sport_gw_mmss_bimc[] = { + SLAVE_PORT_GW_MMSS_BIMC_P0, + SLAVE_PORT_GW_MMSS_BIMC_P1, +}; +static int sport_service_mnoc[] = {SLAVE_PORT_SERVICE_MNOC}; + +/* BIMC Nodes */ + +static int mport_kmpss_m0[] = {MASTER_PORT_KMPSS_M0,}; +static int mport_kmpss_m1[] = {MASTER_PORT_KMPSS_M1}; +static int mport_mss_proc[] = {MASTER_PORT_MSS_PROC}; +static int mport_gw_mnoc_bimc[] = { + MASTER_PORT_GW_MNOC_BIMC_0, + MASTER_PORT_GW_MNOC_BIMC_1, +}; +static int mport_gw_snoc_bimc[] = { + MASTER_PORT_GW_SNOC_BIMC_0, + MASTER_PORT_GW_SNOC_BIMC_1, +}; + +static int sport_ebi1[] = { + SLAVE_PORT_EBI1_CH0, + SLAVE_PORT_EBI1_CH1, +}; +static int sport_kmpss_l2[] = {SLAVE_PORT_KMPSS_L2,}; +static int sport_gw_bimc_snoc[] = {SLAVE_PORT_GW_BIMC_SNOC,}; + +/* OCMEM NOC Nodes */ +static int mport_cnoc_onoc_cfg[] = { + MASTER_PORT_CNOC_ONOC_CFG, +}; +static int mport_jpeg_ocmem[] = {MASTER_PORT_JPEG_OCMEM,}; +static int mport_mdp_ocmem[] = {MASTER_PORT_MDP_OCMEM,}; +static int mport_video_p0_ocmem[] = { + MASTER_PORT_VIDEO_P0_OCMEM, +}; +static int mport_video_p1_ocmem[] = { + MASTER_PORT_VIDEO_P1_OCMEM, +}; +static int mport_vfe_ocmem[] = {MASTER_PORT_VFE_OCMEM,}; +static int sport_ocmem[] = { + SLAVE_PORT_OCMEM_P0, + SLAVE_PORT_OCMEM_P1, +}; + +static int sport_service_onoc[] = {SLAVE_PORT_SERVICE_ONOC,}; + +/* Peripheral NOC Nodes */ +static int mport_sdcc_1[] = {MASTER_PORT_SDCC_1,}; +static int mport_sdcc_3[] = {MASTER_PORT_SDCC_3,}; +static int mport_sdcc_2[] = {MASTER_PORT_SDCC_2,}; +static int mport_sdcc_4[] = {MASTER_PORT_SDCC_4,}; +static int mport_tsif[] = {MASTER_PORT_TSIF,}; +static int mport_bam_dma[] = {MASTER_PORT_BAM_DMA,}; +static int mport_blsp_2[] = {MASTER_PORT_BLSP_2,}; +static int mport_usb_hsic[] = {MASTER_PORT_USB_HSIC,}; +static int mport_blsp_1[] = {MASTER_PORT_BLSP_1,}; +static int mport_usb_hs[] = {MASTER_PORT_USB_HS,}; +static int mport_pnoc_cfg[] = {MASTER_PORT_PNOC_CFG,}; +static int mport_gw_snoc_pnoc[] = {MASTER_PORT_GW_SNOC_PNOC,}; + +static int sport_sdcc_1[] = {SLAVE_PORT_SDCC_1,}; +static int sport_sdcc_3[] = {SLAVE_PORT_SDCC_3,}; +static int sport_sdcc_2[] = {SLAVE_PORT_SDCC_2,}; +static int sport_sdcc_4[] = {SLAVE_PORT_SDCC_4,}; +static int sport_tsif[] = {SLAVE_PORT_TSIF,}; +static int sport_bam_dma[] = {SLAVE_PORT_BAM_DMA,}; +static int sport_blsp_2[] = {SLAVE_PORT_BLSP_2,}; +static int sport_usb_hsic[] = {SLAVE_PORT_USB_HSIC,}; +static int sport_blsp_1[] = {SLAVE_PORT_BLSP_1,}; +static int sport_usb_hs[] = {SLAVE_PORT_USB_HS,}; +static int sport_pdm[] = {SLAVE_PORT_PDM,}; +static int sport_periph_apu_cfg[] = { + SLAVE_PORT_PERIPH_APU_CFG, +}; +static int sport_pnoc_mpu_cfg[] = {SLAVE_PORT_PNOC_MPU_CFG,}; +static int sport_prng[] = {SLAVE_PORT_PRNG,}; +static int sport_gw_pnoc_snoc[] = {SLAVE_PORT_GW_PNOC_SNOC,}; +static int sport_service_pnoc[] = {SLAVE_PORT_SERVICE_PNOC,}; + +/* Config NOC Nodes */ +static int mport_rpm_inst[] = {MASTER_PORT_RPM_INST,}; +static int mport_rpm_data[] = {MASTER_PORT_RPM_DATA,}; +static int mport_rpm_sys[] = {MASTER_PORT_RPM_SYS,}; +static int mport_dehr[] = {MASTER_PORT_DEHR,}; +static int mport_qdss_dap[] = {MASTER_PORT_QDSS_DAP,}; +static int mport_spdm[] = {MASTER_PORT_SPDM,}; +static int mport_tic[] = {MASTER_PORT_TIC,}; +static int mport_gw_snoc_cnoc[] = {MASTER_PORT_GW_SNOC_CNOC,}; + +static int sport_clk_ctl[] = {SLAVE_PORT_CLK_CTL,}; +static int sport_cnoc_mss[] = {SLAVE_PORT_CNOC_MSS,}; +static int sport_security[] = {SLAVE_PORT_SECURITY,}; +static int sport_tcsr[] = {SLAVE_PORT_TCSR,}; +static int sport_tlmm[] = {SLAVE_PORT_TLMM,}; +static int sport_crypto_0_cfg[] = {SLAVE_PORT_CRYPTO_0_CFG,}; +static int sport_crypto_1_cfg[] = {SLAVE_PORT_CRYPTO_1_CFG,}; +static int sport_imem_cfg[] = {SLAVE_PORT_IMEM_CFG,}; +static int sport_message_ram[] = {SLAVE_PORT_MESSAGE_RAM,}; +static int sport_bimc_cfg[] = {SLAVE_PORT_BIMC_CFG,}; +static int sport_boot_rom[] = {SLAVE_PORT_BOOT_ROM,}; +static int sport_cnoc_mnoc_mmss_cfg[] = {SLAVE_PORT_CNOC_MNOC_MMSS_CFG,}; +static int sport_cnoc_mnoc_cfg[] = {SLAVE_PORT_CNOC_MNOC_CFG,}; +static int sport_pmic_arb[] = {SLAVE_PORT_PMIC_ARB,}; +static int sport_spdm_wrapper[] = {SLAVE_PORT_SPDM_WRAPPER,}; +static int sport_dehr_cfg[] = {SLAVE_PORT_DEHR_CFG,}; +static int sport_mpm[] = {SLAVE_PORT_MPM,}; +static int sport_qdss_cfg[] = {SLAVE_PORT_QDSS_CFG,}; +static int sport_rbcpr_cfg[] = {SLAVE_PORT_RBCPR_CFG,}; +static int sport_rbcpr_qdss_apu_cfg[] = {SLAVE_PORT_RBCPR_QDSS_APU_CFG,}; +static int sport_snoc_mpu_cfg[] = {SLAVE_PORT_SNOC_MPU_CFG,}; +static int sport_cnoc_onoc_cfg[] = {SLAVE_PORT_CNOC_ONOC_CFG,}; +static int sport_pnoc_cfg[] = {SLAVE_PORT_PNOC_CFG,}; +static int sport_snoc_cfg[] = {SLAVE_PORT_SNOC_CFG,}; +static int sport_ebi1_dll_cfg[] = {SLAVE_PORT_EBI1_DLL_CFG,}; +static int sport_phy_apu_cfg[] = {SLAVE_PORT_PHY_APU_CFG,}; +static int sport_ebi1_phy_cfg[] = {SLAVE_PORT_EBI1_PHY_CFG,}; +static int sport_rpm[] = {SLAVE_PORT_RPM,}; +static int sport_gw_cnoc_snoc[] = {SLAVE_PORT_GW_CNOC_SNOC,}; +static int sport_service_cnoc[] = {SLAVE_PORT_SERVICE_CNOC,}; + +static int tier2[] = {MSM_BUS_BW_TIER2,}; + +/* + * QOS Ports defined only when qos ports are different than + * master ports + **/ +static int qports_gemini[] = {0}; +static int qports_mdp[] = {1}; +static int qports_venus_p0[] = {4}; +static int qports_venus_p1[] = {5}; +static int qports_vfe[] = {6}; +static int qports_gemini_ocmem[] = {0}; +static int qports_venus_p0_ocmem[] = {2}; +static int qports_venus_p1_ocmem[] = {3}; +static int qports_vfe_ocmem[] = {4}; +static int qports_crypto_c0[] = {2}; +static int qports_crypto_c1[] = {3}; +static int qports_lpass_proc[] = {4}; +static int qports_ocmem_dma[] = {7}; +static int qports_gw_snoc_bimc[] = {5, 6}; +static int qports_kmpss[] = {0, 1}; +static int qports_lpass_ahb[] = {0}; +static int qports_qdss_bam[] = {1}; +static int qports_gw_pnoc_snoc[] = {8}; +static int qports_qdss_etr[] = {10}; +static int qports_usb3[] = {11}; +static int qports_oxili[] = {2, 3}; +static int qports_gw_mnoc_bimc[] = {3, 4}; + +static struct msm_bus_node_info sys_noc_info[] = { + { + .id = MSM_BUS_MASTER_LPASS_AHB, + .masterp = mport_lpass_ahb, + .num_mports = ARRAY_SIZE(mport_lpass_ahb), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .qport = qports_lpass_ahb, + .mas_hw_id = MAS_LPASS_AHB, + .mode = NOC_QOS_MODE_FIXED, + .prio_rd = 2, + .prio_wr = 2, + }, + { + .id = MSM_BUS_MASTER_QDSS_BAM, + .masterp = mport_qdss_bam, + .num_mports = ARRAY_SIZE(mport_qdss_bam), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .mode = NOC_QOS_MODE_FIXED, + .qport = qports_qdss_bam, + .mas_hw_id = MAS_QDSS_BAM, + }, + { + .id = MSM_BUS_MASTER_SNOC_CFG, + .masterp = mport_snoc_cfg, + .num_mports = ARRAY_SIZE(mport_snoc_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .mas_hw_id = MAS_SNOC_CFG, + }, + { + .id = MSM_BUS_FAB_BIMC, + .gateway = 1, + .slavep = sport_gw_snoc_bimc, + .num_sports = ARRAY_SIZE(sport_gw_snoc_bimc), + .masterp = mport_gw_bimc_snoc, + .num_mports = ARRAY_SIZE(mport_gw_bimc_snoc), + .buswidth = 8, + .mas_hw_id = MAS_BIMC_SNOC, + .slv_hw_id = SLV_SNOC_BIMC, + }, + { + .id = MSM_BUS_FAB_CONFIG_NOC, + .gateway = 1, + .slavep = sport_gw_snoc_cnoc, + .num_sports = ARRAY_SIZE(sport_gw_snoc_cnoc), + .masterp = mport_gw_cnoc_snoc, + .num_mports = ARRAY_SIZE(mport_gw_cnoc_snoc), + .buswidth = 8, + .mas_hw_id = MAS_CNOC_SNOC, + .slv_hw_id = SLV_SNOC_CNOC, + }, + { + .id = MSM_BUS_FAB_PERIPH_NOC, + .gateway = 1, + .slavep = sport_gw_snoc_pnoc, + .num_sports = ARRAY_SIZE(sport_gw_snoc_pnoc), + .masterp = mport_gw_pnoc_snoc, + .num_mports = ARRAY_SIZE(mport_gw_pnoc_snoc), + .buswidth = 8, + .qport = qports_gw_pnoc_snoc, + .mas_hw_id = MAS_PNOC_SNOC, + .slv_hw_id = SLV_SNOC_PNOC, + .mode = NOC_QOS_MODE_FIXED, + .prio_rd = 2, + .prio_wr = 2, + }, + { + .id = MSM_BUS_FAB_OCMEM_VNOC, + .gateway = 1, + .buswidth = 8, + .mas_hw_id = MAS_OVNOC_SNOC, + .slv_hw_id = SLV_SNOC_OVNOC, + }, + { + .id = MSM_BUS_MASTER_CRYPTO_CORE0, + .masterp = mport_crypto_core0, + .num_mports = ARRAY_SIZE(mport_crypto_core0), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .mode = NOC_QOS_MODE_FIXED, + .qport = qports_crypto_c0, + .mas_hw_id = MAS_CRYPTO_CORE0, + .hw_sel = MSM_BUS_NOC, + .prio_rd = 1, + .prio_wr = 1, + }, + { + .id = MSM_BUS_MASTER_CRYPTO_CORE1, + .masterp = mport_crypto_core1, + .num_mports = ARRAY_SIZE(mport_crypto_core1), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .mode = NOC_QOS_MODE_FIXED, + .qport = qports_crypto_c1, + .mas_hw_id = MAS_CRYPTO_CORE1, + .hw_sel = MSM_BUS_NOC, + .prio_rd = 1, + .prio_wr = 1, + }, + { + .id = MSM_BUS_MASTER_LPASS_PROC, + .masterp = mport_lpass_proc, + .num_mports = ARRAY_SIZE(mport_lpass_proc), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .qport = qports_lpass_proc, + .mas_hw_id = MAS_LPASS_PROC, + .mode = NOC_QOS_MODE_FIXED, + .prio_rd = 2, + .prio_wr = 2, + }, + { + .id = MSM_BUS_MASTER_MSS, + .masterp = mport_mss, + .num_mports = ARRAY_SIZE(mport_mss), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .mas_hw_id = MAS_MSS, + }, + { + .id = MSM_BUS_MASTER_MSS_NAV, + .masterp = mport_mss_nav, + .num_mports = ARRAY_SIZE(mport_mss_nav), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .mas_hw_id = MAS_MSS_NAV, + }, + { + .id = MSM_BUS_MASTER_OCMEM_DMA, + .masterp = mport_ocmem_dma, + .num_mports = ARRAY_SIZE(mport_ocmem_dma), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .mode = NOC_QOS_MODE_FIXED, + .qport = qports_ocmem_dma, + .mas_hw_id = MAS_OCMEM_DMA, + }, + { + .id = MSM_BUS_MASTER_WCSS, + .masterp = mport_wcss, + .num_mports = ARRAY_SIZE(mport_wcss), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .mas_hw_id = MAS_WCSS, + }, + { + .id = MSM_BUS_MASTER_QDSS_ETR, + .masterp = mport_qdss_etr, + .num_mports = ARRAY_SIZE(mport_qdss_etr), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .qport = qports_qdss_etr, + .mode = NOC_QOS_MODE_FIXED, + .mas_hw_id = MAS_QDSS_ETR, + }, + { + .id = MSM_BUS_MASTER_USB3, + .masterp = mport_usb3, + .num_mports = ARRAY_SIZE(mport_usb3), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .mode = NOC_QOS_MODE_FIXED, + .qport = qports_usb3, + .mas_hw_id = MAS_USB3, + .prio_rd = 2, + .prio_wr = 2, + .hw_sel = MSM_BUS_NOC, + .iface_clk_node = "msm_usb3", + }, + { + .id = MSM_BUS_SLAVE_AMPSS, + .slavep = sport_kmpss, + .num_sports = ARRAY_SIZE(sport_kmpss), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_APPSS, + }, + { + .id = MSM_BUS_SLAVE_LPASS, + .slavep = sport_lpass, + .num_sports = ARRAY_SIZE(sport_lpass), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_LPASS, + }, + { + .id = MSM_BUS_SLAVE_USB3, + .slavep = sport_usb3, + .num_sports = ARRAY_SIZE(sport_usb3), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_USB3, + }, + { + .id = MSM_BUS_SLAVE_WCSS, + .slavep = sport_wcss, + .num_sports = ARRAY_SIZE(sport_wcss), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_WCSS, + }, + { + .id = MSM_BUS_SLAVE_OCIMEM, + .slavep = sport_ocimem, + .num_sports = ARRAY_SIZE(sport_ocimem), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_OCIMEM, + }, + { + .id = MSM_BUS_SLAVE_SNOC_OCMEM, + .slavep = sport_snoc_ocmem, + .num_sports = ARRAY_SIZE(sport_snoc_ocmem), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SNOC_OCMEM, + }, + { + .id = MSM_BUS_SLAVE_SERVICE_SNOC, + .slavep = sport_service_snoc, + .num_sports = ARRAY_SIZE(sport_service_snoc), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SERVICE_SNOC, + }, + { + .id = MSM_BUS_SLAVE_QDSS_STM, + .slavep = sport_qdss_stm, + .num_sports = ARRAY_SIZE(sport_qdss_stm), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_QDSS_STM, + }, +}; + + +static struct msm_bus_node_info mmss_noc_info[] = { + { + .id = MSM_BUS_MASTER_GRAPHICS_3D, + .masterp = mport_gfx3d, + .num_mports = ARRAY_SIZE(mport_gfx3d), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .hw_sel = MSM_BUS_NOC, + .perm_mode = NOC_QOS_PERM_MODE_BYPASS, + .mode = NOC_QOS_MODE_BYPASS, + .ws = 10000, + .qport = qports_oxili, + .mas_hw_id = MAS_GFX3D, + }, + { + .id = MSM_BUS_MASTER_JPEG, + .masterp = mport_jpeg, + .num_mports = ARRAY_SIZE(mport_jpeg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .hw_sel = MSM_BUS_NOC, + .perm_mode = NOC_QOS_PERM_MODE_BYPASS, + .mode = NOC_QOS_MODE_BYPASS, + .qport = qports_gemini, + .ws = 10000, + .mas_hw_id = MAS_JPEG, + }, + { + .id = MSM_BUS_MASTER_MDP_PORT0, + .masterp = mport_mdp, + .num_mports = ARRAY_SIZE(mport_mdp), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .hw_sel = MSM_BUS_NOC, + .perm_mode = NOC_QOS_PERM_MODE_BYPASS, + .mode = NOC_QOS_MODE_BYPASS, + .qport = qports_mdp, + .ws = 10000, + .mas_hw_id = MAS_MDP, + }, + { + .id = MSM_BUS_MASTER_VIDEO_P0, + .masterp = mport_video_port0, + .num_mports = ARRAY_SIZE(mport_video_port0), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .hw_sel = MSM_BUS_NOC, + .perm_mode = NOC_QOS_PERM_MODE_BYPASS, + .mode = NOC_QOS_MODE_BYPASS, + .ws = 10000, + .qport = qports_venus_p0, + .mas_hw_id = MAS_VIDEO_P0, + }, + { + .id = MSM_BUS_MASTER_VIDEO_P1, + .masterp = mport_video_port1, + .num_mports = ARRAY_SIZE(mport_video_port1), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .hw_sel = MSM_BUS_NOC, + .perm_mode = NOC_QOS_PERM_MODE_BYPASS, + .mode = NOC_QOS_MODE_BYPASS, + .ws = 10000, + .qport = qports_venus_p1, + .mas_hw_id = MAS_VIDEO_P1, + }, + { + .id = MSM_BUS_MASTER_VFE, + .masterp = mport_vfe, + .num_mports = ARRAY_SIZE(mport_vfe), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .hw_sel = MSM_BUS_NOC, + .perm_mode = NOC_QOS_PERM_MODE_BYPASS, + .mode = NOC_QOS_MODE_BYPASS, + .ws = 10000, + .qport = qports_vfe, + .mas_hw_id = MAS_VFE, + }, + { + .id = MSM_BUS_FAB_CONFIG_NOC, + .gateway = 1, + .masterp = mport_gw_cnoc_mnoc_cfg, + .num_mports = ARRAY_SIZE(mport_gw_cnoc_mnoc_cfg), + .buswidth = 16, + .hw_sel = MSM_BUS_RPM, + .mas_hw_id = MAS_CNOC_MNOC_MMSS_CFG, + }, + { + .id = MSM_BUS_FAB_BIMC, + .gateway = 1, + .slavep = sport_gw_mmss_bimc, + .num_sports = ARRAY_SIZE(sport_gw_mmss_bimc), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_MMSS_BIMC, + }, + { + .id = MSM_BUS_SLAVE_CAMERA_CFG, + .slavep = sport_camera_cfg, + .num_sports = ARRAY_SIZE(sport_camera_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_CAMERA_CFG, + }, + { + .id = MSM_BUS_SLAVE_DISPLAY_CFG, + .slavep = sport_display_cfg, + .num_sports = ARRAY_SIZE(sport_display_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_DISPLAY_CFG, + }, + { + .id = MSM_BUS_SLAVE_OCMEM_CFG, + .slavep = sport_ocmem_cfg, + .num_sports = ARRAY_SIZE(sport_ocmem_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_OCMEM_CFG, + }, + { + .id = MSM_BUS_SLAVE_CPR_CFG, + .slavep = sport_cpr_cfg, + .num_sports = ARRAY_SIZE(sport_cpr_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_CPR_CFG, + }, + { + .id = MSM_BUS_SLAVE_CPR_XPU_CFG, + .slavep = sport_cpr_xpu_cfg, + .num_sports = ARRAY_SIZE(sport_cpr_xpu_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_CPR_XPU_CFG, + }, + { + .id = MSM_BUS_SLAVE_MISC_CFG, + .slavep = sport_misc_cfg, + .num_sports = ARRAY_SIZE(sport_misc_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_MISC_CFG, + }, + { + .id = MSM_BUS_SLAVE_MISC_XPU_CFG, + .slavep = sport_misc_xpu_cfg, + .num_sports = ARRAY_SIZE(sport_misc_xpu_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_MISC_XPU_CFG, + }, + { + .id = MSM_BUS_SLAVE_VENUS_CFG, + .slavep = sport_venus_cfg, + .num_sports = ARRAY_SIZE(sport_venus_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_VENUS_CFG, + }, + { + .id = MSM_BUS_SLAVE_GRAPHICS_3D_CFG, + .slavep = sport_gfx3d_cfg, + .num_sports = ARRAY_SIZE(sport_gfx3d_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_GFX3D_CFG, + }, + { + .id = MSM_BUS_SLAVE_MMSS_CLK_CFG, + .slavep = sport_mmss_clk_cfg, + .num_sports = ARRAY_SIZE(sport_mmss_clk_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_MMSS_CLK_CFG, + }, + { + .id = MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG, + .slavep = sport_mmss_clk_xpu_cfg, + .num_sports = ARRAY_SIZE(sport_mmss_clk_xpu_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_MMSS_CLK_XPU_CFG, + }, + { + .id = MSM_BUS_SLAVE_MNOC_MPU_CFG, + .slavep = sport_mnoc_mpu_cfg, + .num_sports = ARRAY_SIZE(sport_mnoc_mpu_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_MNOC_MPU_CFG, + }, + { + .id = MSM_BUS_SLAVE_ONOC_MPU_CFG, + .slavep = sport_onoc_mpu_cfg, + .num_sports = ARRAY_SIZE(sport_onoc_mpu_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_ONOC_MPU_CFG, + }, + { + .id = MSM_BUS_SLAVE_SERVICE_MNOC, + .slavep = sport_service_mnoc, + .num_sports = ARRAY_SIZE(sport_service_mnoc), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .hw_sel = MSM_BUS_NOC, + .slv_hw_id = SLV_SERVICE_MNOC, + }, +}; + +static struct msm_bus_node_info bimc_info[] = { + { + .id = MSM_BUS_MASTER_AMPSS_M0, + .masterp = mport_kmpss_m0, + .num_mports = ARRAY_SIZE(mport_kmpss_m0), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .hw_sel = MSM_BUS_BIMC, + .mode = NOC_QOS_MODE_FIXED, + .qport = qports_kmpss, + .ws = 10000, + .mas_hw_id = MAS_APPSS_PROC, + .prio_rd = 1, + .prio_wr = 1, + }, + { + .id = MSM_BUS_MASTER_AMPSS_M1, + .masterp = mport_kmpss_m1, + .num_mports = ARRAY_SIZE(mport_kmpss_m1), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .hw_sel = MSM_BUS_BIMC, + .mode = NOC_QOS_MODE_FIXED, + .qport = qports_kmpss, + .ws = 10000, + .mas_hw_id = MAS_APPSS_PROC, + .prio_rd = 1, + .prio_wr = 1, + }, + { + .id = MSM_BUS_MASTER_MSS_PROC, + .masterp = mport_mss_proc, + .num_mports = ARRAY_SIZE(mport_mss_proc), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .hw_sel = MSM_BUS_RPM, + .mas_hw_id = MAS_AMSS_PROC, + }, + { + .id = MSM_BUS_FAB_MMSS_NOC, + .gateway = 1, + .masterp = mport_gw_mnoc_bimc, + .num_mports = ARRAY_SIZE(mport_gw_mnoc_bimc), + .qport = qports_gw_mnoc_bimc, + .buswidth = 8, + .ws = 10000, + .mas_hw_id = MAS_MNOC_BIMC, + .hw_sel = MSM_BUS_BIMC, + .mode = NOC_QOS_MODE_BYPASS, + }, + { + .id = MSM_BUS_FAB_SYS_NOC, + .gateway = 1, + .slavep = sport_gw_bimc_snoc, + .num_sports = ARRAY_SIZE(sport_gw_bimc_snoc), + .masterp = mport_gw_snoc_bimc, + .num_mports = ARRAY_SIZE(mport_gw_snoc_bimc), + .qport = qports_gw_snoc_bimc, + .buswidth = 8, + .ws = 10000, + .mas_hw_id = MAS_SNOC_BIMC, + .slv_hw_id = SLV_BIMC_SNOC, + }, + { + .id = MSM_BUS_SLAVE_EBI_CH0, + .slavep = sport_ebi1, + .num_sports = ARRAY_SIZE(sport_ebi1), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_EBI, + .mode = NOC_QOS_MODE_BYPASS, + }, + { + .id = MSM_BUS_SLAVE_AMPSS_L2, + .slavep = sport_kmpss_l2, + .num_sports = ARRAY_SIZE(sport_kmpss_l2), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_APSS_L2, + }, +}; + +static struct msm_bus_node_info ocmem_noc_info[] = { + { + .id = MSM_BUS_FAB_OCMEM_VNOC, + .gateway = 1, + .buswidth = 16, + .mas_hw_id = MAS_OVNOC_ONOC, + .slv_hw_id = SLV_ONOC_OVNOC, + }, + { + .id = MSM_BUS_MASTER_JPEG_OCMEM, + .masterp = mport_jpeg_ocmem, + .num_mports = ARRAY_SIZE(mport_jpeg_ocmem), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .perm_mode = NOC_QOS_PERM_MODE_FIXED, + .mode = NOC_QOS_MODE_FIXED, + .qport = qports_gemini_ocmem, + .mas_hw_id = MAS_JPEG_OCMEM, + .hw_sel = MSM_BUS_NOC, + }, + { + .id = MSM_BUS_MASTER_MDP_OCMEM, + .masterp = mport_mdp_ocmem, + .num_mports = ARRAY_SIZE(mport_mdp_ocmem), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .perm_mode = NOC_QOS_PERM_MODE_FIXED, + .mode = NOC_QOS_MODE_FIXED, + .mas_hw_id = MAS_MDP_OCMEM, + .hw_sel = MSM_BUS_NOC, + }, + { + .id = MSM_BUS_MASTER_VIDEO_P0_OCMEM, + .masterp = mport_video_p0_ocmem, + .num_mports = ARRAY_SIZE(mport_video_p0_ocmem), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .perm_mode = NOC_QOS_PERM_MODE_FIXED, + .mode = NOC_QOS_MODE_FIXED, + .qport = qports_venus_p0_ocmem, + .mas_hw_id = MAS_VIDEO_P0_OCMEM, + .hw_sel = MSM_BUS_NOC, + }, + { + .id = MSM_BUS_MASTER_VIDEO_P1_OCMEM, + .masterp = mport_video_p1_ocmem, + .num_mports = ARRAY_SIZE(mport_video_p1_ocmem), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .perm_mode = NOC_QOS_PERM_MODE_FIXED, + .mode = NOC_QOS_MODE_FIXED, + .qport = qports_venus_p1_ocmem, + .mas_hw_id = MAS_VIDEO_P1_OCMEM, + .hw_sel = MSM_BUS_NOC, + }, + { + .id = MSM_BUS_MASTER_VFE_OCMEM, + .masterp = mport_vfe_ocmem, + .num_mports = ARRAY_SIZE(mport_vfe_ocmem), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .perm_mode = NOC_QOS_PERM_MODE_FIXED, + .mode = NOC_QOS_MODE_FIXED, + .qport = qports_vfe_ocmem, + .mas_hw_id = MAS_VFE_OCMEM, + .hw_sel = MSM_BUS_NOC, + .prio_rd = 1, + .prio_wr = 1, + }, + { + .id = MSM_BUS_MASTER_CNOC_ONOC_CFG, + .masterp = mport_cnoc_onoc_cfg, + .num_mports = ARRAY_SIZE(mport_cnoc_onoc_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .mas_hw_id = MAS_CNOC_ONOC_CFG, + .hw_sel = MSM_BUS_NOC, + }, + { + .id = MSM_BUS_SLAVE_SERVICE_ONOC, + .slavep = sport_service_onoc, + .num_sports = ARRAY_SIZE(sport_service_onoc), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .slv_hw_id = SLV_SERVICE_ONOC, + }, +}; + +static struct msm_bus_node_info periph_noc_info[] = { + { + .id = MSM_BUS_MASTER_PNOC_CFG, + .masterp = mport_pnoc_cfg, + .num_mports = ARRAY_SIZE(mport_pnoc_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_PNOC_CFG, + }, + { + .id = MSM_BUS_MASTER_SDCC_1, + .masterp = mport_sdcc_1, + .num_mports = ARRAY_SIZE(mport_sdcc_1), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_SDCC_1, + }, + { + .id = MSM_BUS_MASTER_SDCC_3, + .masterp = mport_sdcc_3, + .num_mports = ARRAY_SIZE(mport_sdcc_3), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_SDCC_3, + }, + { + .id = MSM_BUS_MASTER_SDCC_4, + .masterp = mport_sdcc_4, + .num_mports = ARRAY_SIZE(mport_sdcc_4), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_SDCC_4, + }, + { + .id = MSM_BUS_MASTER_SDCC_2, + .masterp = mport_sdcc_2, + .num_mports = ARRAY_SIZE(mport_sdcc_2), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_SDCC_2, + }, + { + .id = MSM_BUS_MASTER_TSIF, + .masterp = mport_tsif, + .num_mports = ARRAY_SIZE(mport_tsif), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_TSIF, + }, + { + .id = MSM_BUS_MASTER_BAM_DMA, + .masterp = mport_bam_dma, + .num_mports = ARRAY_SIZE(mport_bam_dma), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_BAM_DMA, + }, + { + .id = MSM_BUS_MASTER_BLSP_2, + .masterp = mport_blsp_2, + .num_mports = ARRAY_SIZE(mport_blsp_2), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_BLSP_2, + }, + { + .id = MSM_BUS_MASTER_USB_HSIC, + .masterp = mport_usb_hsic, + .num_mports = ARRAY_SIZE(mport_usb_hsic), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_USB_HSIC, + }, + { + .id = MSM_BUS_MASTER_BLSP_1, + .masterp = mport_blsp_1, + .num_mports = ARRAY_SIZE(mport_blsp_1), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_BLSP_1, + }, + { + .id = MSM_BUS_MASTER_USB_HS, + .masterp = mport_usb_hs, + .num_mports = ARRAY_SIZE(mport_usb_hs), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_USB_HS, + }, + { + .id = MSM_BUS_FAB_SYS_NOC, + .gateway = 1, + .slavep = sport_gw_pnoc_snoc, + .num_sports = ARRAY_SIZE(sport_gw_pnoc_snoc), + .masterp = mport_gw_snoc_pnoc, + .num_mports = ARRAY_SIZE(mport_gw_snoc_pnoc), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_PNOC_SNOC, + .mas_hw_id = MAS_SNOC_PNOC, + }, + { + .id = MSM_BUS_SLAVE_SDCC_1, + .slavep = sport_sdcc_1, + .num_sports = ARRAY_SIZE(sport_sdcc_1), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SDCC_1, + }, + { + .id = MSM_BUS_SLAVE_SDCC_3, + .slavep = sport_sdcc_3, + .num_sports = ARRAY_SIZE(sport_sdcc_3), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SDCC_3, + }, + { + .id = MSM_BUS_SLAVE_SDCC_2, + .slavep = sport_sdcc_2, + .num_sports = ARRAY_SIZE(sport_sdcc_2), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SDCC_2, + }, + { + .id = MSM_BUS_SLAVE_SDCC_4, + .slavep = sport_sdcc_4, + .num_sports = ARRAY_SIZE(sport_sdcc_4), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SDCC_4, + }, + { + .id = MSM_BUS_SLAVE_TSIF, + .slavep = sport_tsif, + .num_sports = ARRAY_SIZE(sport_tsif), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_TSIF, + }, + { + .id = MSM_BUS_SLAVE_BAM_DMA, + .slavep = sport_bam_dma, + .num_sports = ARRAY_SIZE(sport_bam_dma), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_BAM_DMA, + }, + { + .id = MSM_BUS_SLAVE_BLSP_2, + .slavep = sport_blsp_2, + .num_sports = ARRAY_SIZE(sport_blsp_2), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_BLSP_2, + }, + { + .id = MSM_BUS_SLAVE_USB_HSIC, + .slavep = sport_usb_hsic, + .num_sports = ARRAY_SIZE(sport_usb_hsic), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_USB_HSIC, + }, + { + .id = MSM_BUS_SLAVE_BLSP_1, + .slavep = sport_blsp_1, + .num_sports = ARRAY_SIZE(sport_blsp_1), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_BLSP_1, + }, + { + .id = MSM_BUS_SLAVE_USB_HS, + .slavep = sport_usb_hs, + .num_sports = ARRAY_SIZE(sport_usb_hs), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_USB_HS, + }, + { + .id = MSM_BUS_SLAVE_PDM, + .slavep = sport_pdm, + .num_sports = ARRAY_SIZE(sport_pdm), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_PDM, + }, + { + .id = MSM_BUS_SLAVE_PERIPH_APU_CFG, + .slavep = sport_periph_apu_cfg, + .num_sports = ARRAY_SIZE(sport_periph_apu_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_PERIPH_APU_CFG, + }, + { + .id = MSM_BUS_SLAVE_PNOC_MPU_CFG, + .slavep = sport_pnoc_mpu_cfg, + .num_sports = ARRAY_SIZE(sport_pnoc_mpu_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_MPU_CFG, + }, + { + .id = MSM_BUS_SLAVE_PRNG, + .slavep = sport_prng, + .num_sports = ARRAY_SIZE(sport_prng), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_PRNG, + }, + { + .id = MSM_BUS_SLAVE_SERVICE_PNOC, + .slavep = sport_service_pnoc, + .num_sports = ARRAY_SIZE(sport_service_pnoc), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SERVICE_PNOC, + }, +}; + +static struct msm_bus_node_info config_noc_info[] = { + { + .id = MSM_BUS_MASTER_RPM_INST, + .masterp = mport_rpm_inst, + .num_mports = ARRAY_SIZE(mport_rpm_inst), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_RPM_INST, + }, + { + .id = MSM_BUS_MASTER_RPM_DATA, + .masterp = mport_rpm_data, + .num_mports = ARRAY_SIZE(mport_rpm_data), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_RPM_DATA, + }, + { + .id = MSM_BUS_MASTER_RPM_SYS, + .masterp = mport_rpm_sys, + .num_mports = ARRAY_SIZE(mport_rpm_sys), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_RPM_SYS, + }, + { + .id = MSM_BUS_MASTER_DEHR, + .masterp = mport_dehr, + .num_mports = ARRAY_SIZE(mport_dehr), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_DEHR, + }, + { + .id = MSM_BUS_MASTER_QDSS_DAP, + .masterp = mport_qdss_dap, + .num_mports = ARRAY_SIZE(mport_qdss_dap), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_QDSS_DAP, + }, + { + .id = MSM_BUS_MASTER_SPDM, + .masterp = mport_spdm, + .num_mports = ARRAY_SIZE(mport_spdm), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_SPDM, + }, + { + .id = MSM_BUS_MASTER_TIC, + .masterp = mport_tic, + .num_mports = ARRAY_SIZE(mport_tic), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_TIC, + }, + { + .id = MSM_BUS_SLAVE_CLK_CTL, + .slavep = sport_clk_ctl, + .num_sports = ARRAY_SIZE(sport_clk_ctl), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_CLK_CTL, + }, + { + .id = MSM_BUS_SLAVE_CNOC_MSS, + .slavep = sport_cnoc_mss, + .num_sports = ARRAY_SIZE(sport_cnoc_mss), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_CNOC_MSS, + }, + { + .id = MSM_BUS_SLAVE_SECURITY, + .slavep = sport_security, + .num_sports = ARRAY_SIZE(sport_security), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SECURITY, + }, + { + .id = MSM_BUS_SLAVE_TCSR, + .slavep = sport_tcsr, + .num_sports = ARRAY_SIZE(sport_tcsr), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_TCSR, + }, + { + .id = MSM_BUS_SLAVE_TLMM, + .slavep = sport_tlmm, + .num_sports = ARRAY_SIZE(sport_tlmm), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_TLMM, + }, + { + .id = MSM_BUS_SLAVE_CRYPTO_0_CFG, + .slavep = sport_crypto_0_cfg, + .num_sports = ARRAY_SIZE(sport_crypto_0_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_CRYPTO_0_CFG, + }, + { + .id = MSM_BUS_SLAVE_CRYPTO_1_CFG, + .slavep = sport_crypto_1_cfg, + .num_sports = ARRAY_SIZE(sport_crypto_1_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_CRYPTO_1_CFG, + }, + { + .id = MSM_BUS_SLAVE_IMEM_CFG, + .slavep = sport_imem_cfg, + .num_sports = ARRAY_SIZE(sport_imem_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_IMEM_CFG, + }, + { + .id = MSM_BUS_SLAVE_MESSAGE_RAM, + .slavep = sport_message_ram, + .num_sports = ARRAY_SIZE(sport_message_ram), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_MESSAGE_RAM, + }, + { + .id = MSM_BUS_SLAVE_BIMC_CFG, + .slavep = sport_bimc_cfg, + .num_sports = ARRAY_SIZE(sport_bimc_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_BIMC_CFG, + }, + { + .id = MSM_BUS_SLAVE_BOOT_ROM, + .slavep = sport_boot_rom, + .num_sports = ARRAY_SIZE(sport_boot_rom), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_BOOT_ROM, + }, + { + .id = MSM_BUS_SLAVE_PMIC_ARB, + .slavep = sport_pmic_arb, + .num_sports = ARRAY_SIZE(sport_pmic_arb), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_PMIC_ARB, + }, + { + .id = MSM_BUS_SLAVE_SPDM_WRAPPER, + .slavep = sport_spdm_wrapper, + .num_sports = ARRAY_SIZE(sport_spdm_wrapper), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SPDM_WRAPPER, + }, + { + .id = MSM_BUS_SLAVE_DEHR_CFG, + .slavep = sport_dehr_cfg, + .num_sports = ARRAY_SIZE(sport_dehr_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_DEHR_CFG, + }, + { + .id = MSM_BUS_SLAVE_MPM, + .slavep = sport_mpm, + .num_sports = ARRAY_SIZE(sport_mpm), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_MPM, + }, + { + .id = MSM_BUS_SLAVE_QDSS_CFG, + .slavep = sport_qdss_cfg, + .num_sports = ARRAY_SIZE(sport_qdss_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_QDSS_CFG, + }, + { + .id = MSM_BUS_SLAVE_RBCPR_CFG, + .slavep = sport_rbcpr_cfg, + .num_sports = ARRAY_SIZE(sport_rbcpr_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_RBCPR_CFG, + }, + { + .id = MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG, + .slavep = sport_rbcpr_qdss_apu_cfg, + .num_sports = ARRAY_SIZE(sport_rbcpr_qdss_apu_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_RBCPR_QDSS_APU_CFG, + }, + { + .id = MSM_BUS_FAB_SYS_NOC, + .gateway = 1, + .slavep = sport_gw_cnoc_snoc, + .num_sports = ARRAY_SIZE(sport_gw_cnoc_snoc), + .masterp = mport_gw_snoc_cnoc, + .num_mports = ARRAY_SIZE(mport_gw_snoc_cnoc), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_SNOC_CNOC, + .slv_hw_id = SLV_CNOC_SNOC, + }, + { + .id = MSM_BUS_SLAVE_CNOC_ONOC_CFG, + .slavep = sport_cnoc_onoc_cfg, + .num_sports = ARRAY_SIZE(sport_cnoc_onoc_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_CNOC_ONOC_CFG, + }, + { + .id = MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG, + .slavep = sport_cnoc_mnoc_mmss_cfg, + .num_sports = ARRAY_SIZE(sport_cnoc_mnoc_mmss_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_CNOC_MNOC_MMSS_CFG, + }, + { + .id = MSM_BUS_SLAVE_CNOC_MNOC_CFG, + .slavep = sport_cnoc_mnoc_cfg, + .num_sports = ARRAY_SIZE(sport_cnoc_mnoc_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_CNOC_MNOC_CFG, + }, + { + .id = MSM_BUS_SLAVE_PNOC_CFG, + .slavep = sport_pnoc_cfg, + .num_sports = ARRAY_SIZE(sport_pnoc_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_PNOC_CFG, + }, + { + .id = MSM_BUS_SLAVE_SNOC_MPU_CFG, + .slavep = sport_snoc_mpu_cfg, + .num_sports = ARRAY_SIZE(sport_snoc_mpu_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SNOC_MPU_CFG, + }, + { + .id = MSM_BUS_SLAVE_SNOC_CFG, + .slavep = sport_snoc_cfg, + .num_sports = ARRAY_SIZE(sport_snoc_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SNOC_CFG, + }, + { + .id = MSM_BUS_SLAVE_EBI1_DLL_CFG, + .slavep = sport_ebi1_dll_cfg, + .num_sports = ARRAY_SIZE(sport_ebi1_dll_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_EBI1_DLL_CFG, + }, + { + .id = MSM_BUS_SLAVE_PHY_APU_CFG, + .slavep = sport_phy_apu_cfg, + .num_sports = ARRAY_SIZE(sport_phy_apu_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_PHY_APU_CFG, + }, + { + .id = MSM_BUS_SLAVE_EBI1_PHY_CFG, + .slavep = sport_ebi1_phy_cfg, + .num_sports = ARRAY_SIZE(sport_ebi1_phy_cfg), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_EBI1_PHY_CFG, + }, + { + .id = MSM_BUS_SLAVE_RPM, + .slavep = sport_rpm, + .num_sports = ARRAY_SIZE(sport_rpm), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_RPM, + }, + { + .id = MSM_BUS_SLAVE_SERVICE_CNOC, + .slavep = sport_service_cnoc, + .num_sports = ARRAY_SIZE(sport_service_cnoc), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .slv_hw_id = SLV_SERVICE_CNOC, + }, +}; + +/* A virtual NoC is needed for connection to OCMEM */ +static struct msm_bus_node_info ocmem_vnoc_info[] = { + { + .id = MSM_BUS_MASTER_V_OCMEM_GFX3D, + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 8, + .mas_hw_id = MAS_V_OCMEM_GFX3D, + }, + { + .id = MSM_BUS_SLAVE_OCMEM, + .slavep = sport_ocmem, + .num_sports = ARRAY_SIZE(sport_ocmem), + .tier = tier2, + .num_tiers = ARRAY_SIZE(tier2), + .buswidth = 16, + .slv_hw_id = SLV_OCMEM, + .tier = tier2, + .slaveclk[DUAL_CTX] = "ocmem_clk", + .slaveclk[ACTIVE_CTX] = "ocmem_a_clk", + }, + { + .id = MSM_BUS_FAB_SYS_NOC, + .gateway = 1, + .buswidth = 8, + .ws = 10000, + .mas_hw_id = MAS_SNOC_OVNOC, + .slv_hw_id = SLV_OVNOC_SNOC, + }, + { + .id = MSM_BUS_FAB_OCMEM_NOC, + .gateway = 1, + .buswidth = 16, + .ws = 10000, + .mas_hw_id = MAS_ONOC_OVNOC, + .slv_hw_id = SLV_OVNOC_ONOC, + }, +}; + +static void msm_bus_board_assign_iids(struct msm_bus_fabric_registration + *fabreg, int fabid) +{ + int i; + for (i = 0; i < fabreg->len; i++) { + if (!fabreg->info[i].gateway) { + fabreg->info[i].priv_id = fabid + fabreg->info[i].id; + if (fabreg->info[i].id < SLAVE_ID_KEY) { + WARN(fabreg->info[i].id >= NMASTERS, + "id %d exceeds array size!\n", + fabreg->info[i].id); + master_iids[fabreg->info[i].id] = + fabreg->info[i].priv_id; + } else { + WARN((fabreg->info[i].id - SLAVE_ID_KEY) >= + NSLAVES, "id %d exceeds array size!\n", + fabreg->info[i].id); + slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)] + = fabreg->info[i].priv_id; + } + } else { + fabreg->info[i].priv_id = fabreg->info[i].id; + } + } +} + +static int msm_bus_board_8974_get_iid(int id) +{ + if ((id < SLAVE_ID_KEY && id >= NMASTERS) || + id >= (SLAVE_ID_KEY + NSLAVES)) { + MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id); + return -EINVAL; + } + + return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] : + slave_iids[id - SLAVE_ID_KEY]), id); +} + +int msm_bus_board_rpm_get_il_ids(uint16_t *id) +{ + return -ENXIO; +} + +static struct msm_bus_board_algorithm msm_bus_board_algo = { + .board_nfab = NFAB_8974, + .get_iid = msm_bus_board_8974_get_iid, + .assign_iids = msm_bus_board_assign_iids, +}; + +struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata = { + .id = MSM_BUS_FAB_SYS_NOC, + .name = "msm_sys_noc", + .info = sys_noc_info, + .len = ARRAY_SIZE(sys_noc_info), + .ahb = 0, + .fabclk[DUAL_CTX] = "bus_clk", + .fabclk[ACTIVE_CTX] = "bus_a_clk", + .nmasters = 15, + .nslaves = 12, + .ntieredslaves = 0, + .board_algo = &msm_bus_board_algo, + .qos_freq = 4800, + .hw_sel = MSM_BUS_NOC, + .rpm_enabled = 1, +}; + +struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata = { + .id = MSM_BUS_FAB_MMSS_NOC, + .name = "msm_mmss_noc", + .info = mmss_noc_info, + .len = ARRAY_SIZE(mmss_noc_info), + .ahb = 0, + .fabclk[DUAL_CTX] = "bus_clk", + .fabclk[ACTIVE_CTX] = "bus_a_clk", + .nmasters = 9, + .nslaves = 16, + .ntieredslaves = 0, + .board_algo = &msm_bus_board_algo, + .qos_freq = 4800, + .hw_sel = MSM_BUS_NOC, + .rpm_enabled = 1, +}; + +struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata = { + .id = MSM_BUS_FAB_BIMC, + .name = "msm_bimc", + .info = bimc_info, + .len = ARRAY_SIZE(bimc_info), + .ahb = 0, + .fabclk[DUAL_CTX] = "mem_clk", + .fabclk[ACTIVE_CTX] = "mem_a_clk", + .nmasters = 7, + .nslaves = 4, + .ntieredslaves = 0, + .board_algo = &msm_bus_board_algo, + .qos_freq = 4800, + .hw_sel = MSM_BUS_BIMC, + .rpm_enabled = 1, +}; + +struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata = { + .id = MSM_BUS_FAB_OCMEM_NOC, + .name = "msm_ocmem_noc", + .info = ocmem_noc_info, + .len = ARRAY_SIZE(ocmem_noc_info), + .ahb = 0, + .fabclk[DUAL_CTX] = "bus_clk", + .fabclk[ACTIVE_CTX] = "bus_a_clk", + .nmasters = 6, + .nslaves = 3, + .ntieredslaves = 0, + .board_algo = &msm_bus_board_algo, + .qos_freq = 4800, + .hw_sel = MSM_BUS_NOC, + .rpm_enabled = 1, +}; + +struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata = { + .id = MSM_BUS_FAB_PERIPH_NOC, + .name = "msm_periph_noc", + .info = periph_noc_info, + .len = ARRAY_SIZE(periph_noc_info), + .ahb = 0, + .fabclk[DUAL_CTX] = "bus_clk", + .fabclk[ACTIVE_CTX] = "bus_a_clk", + .nmasters = 12, + .nslaves = 16, + .ntieredslaves = 0, + .board_algo = &msm_bus_board_algo, + .hw_sel = MSM_BUS_NOC, + .rpm_enabled = 1, +}; + +struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata = { + .id = MSM_BUS_FAB_CONFIG_NOC, + .name = "msm_config_noc", + .info = config_noc_info, + .len = ARRAY_SIZE(config_noc_info), + .ahb = 0, + .fabclk[DUAL_CTX] = "bus_clk", + .fabclk[ACTIVE_CTX] = "bus_a_clk", + .nmasters = 8, + .nslaves = 30, + .ntieredslaves = 0, + .board_algo = &msm_bus_board_algo, + .hw_sel = MSM_BUS_NOC, + .rpm_enabled = 1, +}; + +struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata = { + .id = MSM_BUS_FAB_OCMEM_VNOC, + .name = "msm_ocmem_vnoc", + .info = ocmem_vnoc_info, + .len = ARRAY_SIZE(ocmem_vnoc_info), + .ahb = 0, + .nmasters = 5, + .nslaves = 4, + .ntieredslaves = 0, + .board_algo = &msm_bus_board_algo, + .hw_sel = MSM_BUS_NOC, + .virt = 1, + .rpm_enabled = 1, +}; + +void msm_bus_board_init(struct msm_bus_fabric_registration *pdata) +{ + pdata->board_algo = &msm_bus_board_algo; +} + +void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata, + int nfab) +{ + if (nfab <= 0) + return; + + msm_bus_board_algo.board_nfab = nfab; +} diff --git a/drivers/soc/qcom/msm_bus/msm_bus_client_api.c b/drivers/soc/qcom/msm_bus/msm_bus_client_api.c new file mode 100644 index 000000000000..5b386794fedf --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_client_api.c @@ -0,0 +1,142 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/radix-tree.h> +#include <linux/clk.h> +#include <linux/msm-bus.h> +#include "msm_bus_core.h" + +struct msm_bus_arb_ops arb_ops; + +/** + * msm_bus_scale_register_client() - Register the clients with the msm bus + * driver + * @pdata: Platform data of the client, containing src, dest, ab, ib. + * Return non-zero value in case of success, 0 in case of failure. + * + * Client data contains the vectors specifying arbitrated bandwidth (ab) + * and instantaneous bandwidth (ib) requested between a particular + * src and dest. + */ +uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata) +{ + if (arb_ops.register_client) + return arb_ops.register_client(pdata); + else { + pr_err("%s: Bus driver not ready.", + __func__); + return 0; + } +} +EXPORT_SYMBOL(msm_bus_scale_register_client); + +/** + * msm_bus_scale_client_update_request() - Update the request for bandwidth + * from a particular client + * + * cl: Handle to the client + * index: Index into the vector, to which the bw and clock values need to be + * updated + */ +int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index) +{ + if (arb_ops.update_request) + return arb_ops.update_request(cl, index); + else { + pr_err("%s: Bus driver not ready.", + __func__); + return -EPROBE_DEFER; + } +} +EXPORT_SYMBOL(msm_bus_scale_client_update_request); + +/** + * msm_bus_scale_unregister_client() - Unregister the client from the bus driver + * @cl: Handle to the client + */ +void msm_bus_scale_unregister_client(uint32_t cl) +{ + if (arb_ops.unregister_client) + arb_ops.unregister_client(cl); + else { + pr_err("%s: Bus driver not ready.", + __func__); + } +} +EXPORT_SYMBOL(msm_bus_scale_unregister_client); + +/** + * msm_bus_scale_register() - Register the clients with the msm bus + * driver + * @pdata: Platform data of the client, containing src, dest, ab, ib. + * Return non-zero value in case of success, 0 in case of failure. + * + * Client data contains the vectors specifying arbitrated bandwidth (ab) + * and instantaneous bandwidth (ib) requested between a particular + * src and dest. + */ +struct msm_bus_client_handle* +msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name, bool active_only) +{ + if (arb_ops.register_cl) + return arb_ops.register_cl(mas, slv, name, active_only); + else { + pr_err("%s: Bus driver not ready.", + __func__); + return ERR_PTR(-EPROBE_DEFER); + + } +} +EXPORT_SYMBOL(msm_bus_scale_register); + +/** + * msm_bus_scale_client_update_bw() - Update the request for bandwidth + * from a particular client + * + * cl: Handle to the client + * index: Index into the vector, to which the bw and clock values need to be + * updated + */ +int msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib) +{ + if (arb_ops.update_request) + return arb_ops.update_bw(cl, ab, ib); + else { + pr_err("%s: Bus driver not ready.", __func__); + return -EPROBE_DEFER; + } +} +EXPORT_SYMBOL(msm_bus_scale_update_bw); + +/** + * msm_bus_scale_unregister() - Update the request for bandwidth + * from a particular client + * + * cl: Handle to the client + */ +void msm_bus_scale_unregister(struct msm_bus_client_handle *cl) +{ + if (arb_ops.unregister) + arb_ops.unregister(cl); + else + pr_err("%s: Bus driver not ready.", + __func__); +} +EXPORT_SYMBOL(msm_bus_scale_unregister); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_config.c b/drivers/soc/qcom/msm_bus/msm_bus_config.c new file mode 100644 index 000000000000..ea7fe7f2e1dd --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_config.c @@ -0,0 +1,78 @@ +/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/radix-tree.h> +#include <linux/clk.h> +#include <linux/msm-bus-board.h> +#include <linux/msm-bus.h> +#include "msm_bus_core.h" + +static DEFINE_MUTEX(msm_bus_config_lock); + +/** + * msm_bus_axi_porthalt() - Halt the given axi master port + * @master_port: AXI Master port to be halted + */ +int msm_bus_axi_porthalt(int master_port) +{ + int ret = 0; + int priv_id; + struct msm_bus_fabric_device *fabdev; + + priv_id = msm_bus_board_get_iid(master_port); + MSM_BUS_DBG("master_port: %d iid: %d fabid%d\n", + master_port, priv_id, GET_FABID(priv_id)); + fabdev = msm_bus_get_fabric_device(GET_FABID(priv_id)); + if (IS_ERR_OR_NULL(fabdev)) { + MSM_BUS_ERR("Fabric device not found for mport: %d\n", + master_port); + return -ENODEV; + } + mutex_lock(&msm_bus_config_lock); + ret = fabdev->algo->port_halt(fabdev, priv_id); + mutex_unlock(&msm_bus_config_lock); + return ret; +} +EXPORT_SYMBOL(msm_bus_axi_porthalt); + +/** + * msm_bus_axi_portunhalt() - Unhalt the given axi master port + * @master_port: AXI Master port to be unhalted + */ +int msm_bus_axi_portunhalt(int master_port) +{ + int ret = 0; + int priv_id; + struct msm_bus_fabric_device *fabdev; + + priv_id = msm_bus_board_get_iid(master_port); + MSM_BUS_DBG("master_port: %d iid: %d fabid: %d\n", + master_port, priv_id, GET_FABID(priv_id)); + fabdev = msm_bus_get_fabric_device(GET_FABID(priv_id)); + if (IS_ERR_OR_NULL(fabdev)) { + MSM_BUS_ERR("Fabric device not found for mport: %d\n", + master_port); + return -ENODEV; + } + mutex_lock(&msm_bus_config_lock); + ret = fabdev->algo->port_unhalt(fabdev, priv_id); + mutex_unlock(&msm_bus_config_lock); + return ret; +} +EXPORT_SYMBOL(msm_bus_axi_portunhalt); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.c b/drivers/soc/qcom/msm_bus/msm_bus_core.c new file mode 100644 index 000000000000..93c8f4f9ee34 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_core.c @@ -0,0 +1,119 @@ +/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/radix-tree.h> +#include <linux/clk.h> +#include <linux/msm-bus-board.h> +#include <linux/msm-bus.h> +#include "msm_bus_core.h" + +static atomic_t num_fab = ATOMIC_INIT(0); + +int msm_bus_get_num_fab(void) +{ + return atomic_read(&num_fab); +} + +int msm_bus_device_match(struct device *dev, void *id) +{ + struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev); + + if (!fabdev) { + MSM_BUS_WARN("Fabric %p returning 0\n", fabdev); + return 0; + } + return fabdev->id == *(int *)id; +} + +struct bus_type msm_bus_type = { + .name = "msm-bus-type", +}; +EXPORT_SYMBOL(msm_bus_type); + +/** + * msm_bus_get_fabric_device() - This function is used to search for + * the fabric device on the bus + * @fabid: Fabric id + * Function returns: Pointer to the fabric device + */ +struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid) +{ + struct device *dev; + struct msm_bus_fabric_device *fabric; + dev = bus_find_device(&msm_bus_type, NULL, (void *)&fabid, + msm_bus_device_match); + if (!dev) + return NULL; + fabric = to_msm_bus_fabric_device(dev); + return fabric; +} + +/** + * msm_bus_fabric_device_register() - Registers a fabric on msm bus + * @fabdev: Fabric device to be registered + */ +int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabdev) +{ + int ret = 0; + fabdev->dev.bus = &msm_bus_type; + ret = dev_set_name(&fabdev->dev, fabdev->name); + if (ret) { + MSM_BUS_ERR("error setting dev name\n"); + goto err; + } + ret = device_register(&fabdev->dev); + if (ret < 0) { + MSM_BUS_ERR("error registering device%d %s\n", + ret, fabdev->name); + goto err; + } + atomic_inc(&num_fab); +err: + return ret; +} + +/** + * msm_bus_fabric_device_unregister() - Unregisters the fabric + * devices from the msm bus + */ +void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabdev) +{ + device_unregister(&fabdev->dev); + atomic_dec(&num_fab); +} + +static void __exit msm_bus_exit(void) +{ + bus_unregister(&msm_bus_type); +} + +static int __init msm_bus_init(void) +{ + int retval = 0; + retval = bus_register(&msm_bus_type); + if (retval) + MSM_BUS_ERR("bus_register error! %d\n", + retval); + return retval; +} +postcore_initcall(msm_bus_init); +module_exit(msm_bus_exit); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("0.2"); +MODULE_ALIAS("platform:msm_bus"); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.h b/drivers/soc/qcom/msm_bus/msm_bus_core.h new file mode 100644 index 000000000000..c35a45caf236 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_core.h @@ -0,0 +1,403 @@ +/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ARCH_ARM_MACH_MSM_BUS_CORE_H +#define _ARCH_ARM_MACH_MSM_BUS_CORE_H + +#include <linux/types.h> +#include <linux/device.h> +#include <linux/radix-tree.h> +#include <linux/platform_device.h> +#include <linux/msm-bus-board.h> +#include <linux/msm-bus.h> + +#define MSM_BUS_DBG(msg, ...) \ + pr_debug(msg, ## __VA_ARGS__) +#define MSM_BUS_ERR(msg, ...) \ + pr_err(msg, ## __VA_ARGS__) +#define MSM_BUS_WARN(msg, ...) \ + pr_warn(msg, ## __VA_ARGS__) +#define MSM_FAB_ERR(msg, ...) \ + dev_err(&fabric->fabdev.dev, msg, ## __VA_ARGS__) + +#define IS_MASTER_VALID(mas) \ + (((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \ + ? 1 : 0) +#define IS_SLAVE_VALID(slv) \ + (((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0) + +#define INTERLEAVED_BW(fab_pdata, bw, ports) \ + ((fab_pdata->il_flag) ? ((bw < 0) \ + ? -msm_bus_div64((ports), (-bw)) : msm_bus_div64((ports), (bw))) : (bw)) +#define INTERLEAVED_VAL(fab_pdata, n) \ + ((fab_pdata->il_flag) ? (n) : 1) +#define KBTOB(a) (a * 1000ULL) + +enum msm_bus_dbg_op_type { + MSM_BUS_DBG_UNREGISTER = -2, + MSM_BUS_DBG_REGISTER, + MSM_BUS_DBG_OP = 1, +}; + +enum msm_bus_hw_sel { + MSM_BUS_RPM = 0, + MSM_BUS_NOC, + MSM_BUS_BIMC, +}; + +struct msm_bus_arb_ops { + uint32_t (*register_client)(struct msm_bus_scale_pdata *pdata); + int (*update_request)(uint32_t cl, unsigned int index); + void (*unregister_client)(uint32_t cl); + struct msm_bus_client_handle* + (*register_cl)(uint32_t mas, uint32_t slv, char *name, + bool active_only); + int (*update_bw)(struct msm_bus_client_handle *cl, u64 ab, u64 ib); + void (*unregister)(struct msm_bus_client_handle *cl); +}; + +enum { + SLAVE_NODE, + MASTER_NODE, + CLK_NODE, + NR_LIM_NODE, +}; + + +extern struct bus_type msm_bus_type; +extern struct msm_bus_arb_ops arb_ops; +extern void msm_bus_arb_setops_legacy(struct msm_bus_arb_ops *arb_ops); + +struct msm_bus_node_info { + unsigned int id; + unsigned int priv_id; + unsigned int mas_hw_id; + unsigned int slv_hw_id; + int gateway; + int *masterp; + int *qport; + int num_mports; + int *slavep; + int num_sports; + int *tier; + int num_tiers; + int ahb; + int hw_sel; + const char *slaveclk[NUM_CTX]; + const char *memclk[NUM_CTX]; + const char *iface_clk_node; + unsigned int buswidth; + unsigned int ws; + unsigned int mode; + unsigned int perm_mode; + unsigned int prio_lvl; + unsigned int prio_rd; + unsigned int prio_wr; + unsigned int prio1; + unsigned int prio0; + unsigned int num_thresh; + u64 *th; + u64 cur_lim_bw; + unsigned int mode_thresh; + bool dual_conf; + u64 *bimc_bw; + bool nr_lim; + u32 ff; + bool rt_mas; + u32 bimc_gp; + u32 bimc_thmp; + u64 floor_bw; + const char *name; +}; + +struct path_node { + uint64_t clk[NUM_CTX]; + uint64_t bw[NUM_CTX]; + uint64_t *sel_clk; + uint64_t *sel_bw; + int next; +}; + +struct msm_bus_link_info { + uint64_t clk[NUM_CTX]; + uint64_t *sel_clk; + uint64_t memclk; + int64_t bw[NUM_CTX]; + int64_t *sel_bw; + int *tier; + int num_tiers; +}; + +struct nodeclk { + struct clk *clk; + uint64_t rate; + bool dirty; + bool enable; +}; + +struct msm_bus_inode_info { + struct msm_bus_node_info *node_info; + uint64_t max_bw; + uint64_t max_clk; + uint64_t cur_lim_bw; + uint64_t cur_prg_bw; + struct msm_bus_link_info link_info; + int num_pnodes; + struct path_node *pnode; + int commit_index; + struct nodeclk nodeclk[NUM_CTX]; + struct nodeclk memclk[NUM_CTX]; + struct nodeclk iface_clk; + void *hw_data; +}; + +struct msm_bus_node_hw_info { + bool dirty; + unsigned int hw_id; + uint64_t bw; +}; + +struct msm_bus_hw_algorithm { + int (*allocate_commit_data)(struct msm_bus_fabric_registration + *fab_pdata, void **cdata, int ctx); + void *(*allocate_hw_data)(struct platform_device *pdev, + struct msm_bus_fabric_registration *fab_pdata); + void (*node_init)(void *hw_data, struct msm_bus_inode_info *info); + void (*free_commit_data)(void *cdata); + void (*update_bw)(struct msm_bus_inode_info *hop, + struct msm_bus_inode_info *info, + struct msm_bus_fabric_registration *fab_pdata, + void *sel_cdata, int *master_tiers, + int64_t add_bw); + void (*fill_cdata_buffer)(int *curr, char *buf, const int max_size, + void *cdata, int nmasters, int nslaves, int ntslaves); + int (*commit)(struct msm_bus_fabric_registration + *fab_pdata, void *hw_data, void **cdata); + int (*port_unhalt)(uint32_t haltid, uint8_t mport); + int (*port_halt)(uint32_t haltid, uint8_t mport); + void (*config_master)(struct msm_bus_fabric_registration *fab_pdata, + struct msm_bus_inode_info *info, + uint64_t req_clk, uint64_t req_bw); + void (*config_limiter)(struct msm_bus_fabric_registration *fab_pdata, + struct msm_bus_inode_info *info); + bool (*update_bw_reg)(int mode); +}; + +struct msm_bus_fabric_device { + int id; + const char *name; + struct device dev; + const struct msm_bus_fab_algorithm *algo; + const struct msm_bus_board_algorithm *board_algo; + struct msm_bus_hw_algorithm hw_algo; + int visited; + int num_nr_lim; + u64 nr_lim_thresh; + u32 eff_fact; +}; +#define to_msm_bus_fabric_device(d) container_of(d, \ + struct msm_bus_fabric_device, d) + +struct msm_bus_fabric { + struct msm_bus_fabric_device fabdev; + int ahb; + void *cdata[NUM_CTX]; + bool arb_dirty; + bool clk_dirty; + struct radix_tree_root fab_tree; + int num_nodes; + struct list_head gateways; + struct msm_bus_inode_info info; + struct msm_bus_fabric_registration *pdata; + void *hw_data; +}; +#define to_msm_bus_fabric(d) container_of(d, \ + struct msm_bus_fabric, d) + + +struct msm_bus_fab_algorithm { + int (*update_clks)(struct msm_bus_fabric_device *fabdev, + struct msm_bus_inode_info *pme, int index, + uint64_t curr_clk, uint64_t req_clk, + uint64_t bwsum, int flag, int ctx, + unsigned int cl_active_flag); + int (*port_halt)(struct msm_bus_fabric_device *fabdev, int portid); + int (*port_unhalt)(struct msm_bus_fabric_device *fabdev, int portid); + int (*commit)(struct msm_bus_fabric_device *fabdev); + struct msm_bus_inode_info *(*find_node)(struct msm_bus_fabric_device + *fabdev, int id); + struct msm_bus_inode_info *(*find_gw_node)(struct msm_bus_fabric_device + *fabdev, int id); + struct list_head *(*get_gw_list)(struct msm_bus_fabric_device *fabdev); + void (*update_bw)(struct msm_bus_fabric_device *fabdev, struct + msm_bus_inode_info * hop, struct msm_bus_inode_info *info, + int64_t add_bw, int *master_tiers, int ctx); + void (*config_master)(struct msm_bus_fabric_device *fabdev, + struct msm_bus_inode_info *info, uint64_t req_clk, + uint64_t req_bw); + void (*config_limiter)(struct msm_bus_fabric_device *fabdev, + struct msm_bus_inode_info *info); +}; + +struct msm_bus_board_algorithm { + int board_nfab; + void (*assign_iids)(struct msm_bus_fabric_registration *fabreg, + int fabid); + int (*get_iid)(int id); +}; + +/** + * Used to store the list of fabrics and other info to be + * maintained outside the fabric structure. + * Used while calculating path, and to find fabric ptrs + */ +struct msm_bus_fabnodeinfo { + struct list_head list; + struct msm_bus_inode_info *info; +}; + +struct msm_bus_client { + int id; + struct msm_bus_scale_pdata *pdata; + int *src_pnode; + int curr; +}; + +uint64_t msm_bus_div64(unsigned int width, uint64_t bw); +int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric); +void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric); +struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid); +int msm_bus_get_num_fab(void); + + +int msm_bus_hw_fab_init(struct msm_bus_fabric_registration *pdata, + struct msm_bus_hw_algorithm *hw_algo); +void msm_bus_board_init(struct msm_bus_fabric_registration *pdata); +void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata, + int nfab); +#if defined(CONFIG_QCOM_SMD_RPM) +int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata, + struct msm_bus_hw_algorithm *hw_algo); +int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration + *fab_pdata, void *hw_data, void **cdata); +void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size, + void *cdata, int nmasters, int nslaves, int ntslaves); +#else +static inline int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata, + struct msm_bus_hw_algorithm *hw_algo) +{ + return 0; +} +static inline int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration + *fab_pdata, void *hw_data, void **cdata) +{ + return 0; +} +static inline void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, + const int max_size, void *cdata, int nmasters, int nslaves, + int ntslaves) +{ +} +#endif + +int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata, + struct msm_bus_hw_algorithm *hw_algo); +int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata, + struct msm_bus_hw_algorithm *hw_algo); +#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING) +void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index, + uint32_t cl); +void msm_bus_dbg_commit_data(const char *fabname, void *cdata, + int nmasters, int nslaves, int ntslaves, int op); +int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata); +int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata, + u64 ab, u64 ib); +void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata); + +#else +static inline void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, + int index, uint32_t cl) +{ +} +static inline void msm_bus_dbg_commit_data(const char *fabname, + void *cdata, int nmasters, int nslaves, int ntslaves, + int op) +{ +} +static inline void void msm_bus_dbg_remove_client + (const struct msm_bus_client_handle *pdata) +{ +} + +static inline int +msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata, + u64 ab, u64 ib) +{ + return 0; +} + +static inline int +msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata) +{ + return 0; +} +#endif + +#if 0 +//#ifdef CONFIG_CORESIGHT +int msmbus_coresight_init(struct platform_device *pdev); +void msmbus_coresight_remove(struct platform_device *pdev); +int msmbus_coresight_init_adhoc(struct platform_device *pdev, + struct device_node *of_node); +void msmbus_coresight_remove_adhoc(struct platform_device *pdev); +#else +static inline int msmbus_coresight_init(struct platform_device *pdev) +{ + return 0; +} + +static inline void msmbus_coresight_remove(struct platform_device *pdev) +{ +} + +static inline int msmbus_coresight_init_adhoc(struct platform_device *pdev, + struct device_node *of_node) +{ + return 0; +} + +static inline void msmbus_coresight_remove_adhoc(struct platform_device *pdev) +{ +} +#endif + + +#ifdef CONFIG_OF +void msm_bus_of_get_nfab(struct platform_device *pdev, + struct msm_bus_fabric_registration *pdata); +struct msm_bus_fabric_registration + *msm_bus_of_get_fab_data(struct platform_device *pdev); +#else +static inline void msm_bus_of_get_nfab(struct platform_device *pdev, + struct msm_bus_fabric_registration *pdata) +{ + return; +} + +static inline struct msm_bus_fabric_registration + *msm_bus_of_get_fab_data(struct platform_device *pdev) +{ + return NULL; +} +#endif + +#endif /*_ARCH_ARM_MACH_MSM_BUS_CORE_H*/ diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c new file mode 100644 index 000000000000..d4c2068d9e3a --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c @@ -0,0 +1,903 @@ +/* Copyright (c) 2010-2012, 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <linux/hrtimer.h> +#include <linux/msm-bus-board.h> +#include <linux/msm-bus.h> +#include <linux/msm_bus_rules.h> +#include "msm_bus_core.h" +#include "msm_bus_adhoc.h" + +#define CREATE_TRACE_POINTS +#include <trace/events/trace_msm_bus.h> + +#define MAX_BUFF_SIZE 4096 +#define FILL_LIMIT 128 + +static struct dentry *clients; +static struct dentry *dir; +static DEFINE_MUTEX(msm_bus_dbg_fablist_lock); +struct msm_bus_dbg_state { + uint32_t cl; + uint8_t enable; + uint8_t current_index; +} clstate; + +struct msm_bus_cldata { + const struct msm_bus_scale_pdata *pdata; + const struct msm_bus_client_handle *handle; + int index; + uint32_t clid; + int size; + struct dentry *file; + struct list_head list; + char buffer[MAX_BUFF_SIZE]; +}; + +struct msm_bus_fab_list { + const char *name; + int size; + struct dentry *file; + struct list_head list; + char buffer[MAX_BUFF_SIZE]; +}; + +static char *rules_buf; + +LIST_HEAD(fabdata_list); +LIST_HEAD(cl_list); + +/** + * The following structures and funtions are used for + * the test-client which can be created at run-time. + */ + +static struct msm_bus_vectors init_vectors[1]; +static struct msm_bus_vectors current_vectors[1]; +static struct msm_bus_vectors requested_vectors[1]; + +static struct msm_bus_paths shell_client_usecases[] = { + { + .num_paths = ARRAY_SIZE(init_vectors), + .vectors = init_vectors, + }, + { + .num_paths = ARRAY_SIZE(current_vectors), + .vectors = current_vectors, + }, + { + .num_paths = ARRAY_SIZE(requested_vectors), + .vectors = requested_vectors, + }, +}; + +static struct msm_bus_scale_pdata shell_client = { + .usecase = shell_client_usecases, + .num_usecases = ARRAY_SIZE(shell_client_usecases), + .name = "test-client", +}; + +static void msm_bus_dbg_init_vectors(void) +{ + init_vectors[0].src = -1; + init_vectors[0].dst = -1; + init_vectors[0].ab = 0; + init_vectors[0].ib = 0; + current_vectors[0].src = -1; + current_vectors[0].dst = -1; + current_vectors[0].ab = 0; + current_vectors[0].ib = 0; + requested_vectors[0].src = -1; + requested_vectors[0].dst = -1; + requested_vectors[0].ab = 0; + requested_vectors[0].ib = 0; + clstate.enable = 0; + clstate.current_index = 0; +} + +static int msm_bus_dbg_update_cl_request(uint32_t cl) +{ + int ret = 0; + + if (clstate.current_index < 2) + clstate.current_index = 2; + else { + clstate.current_index = 1; + current_vectors[0].ab = requested_vectors[0].ab; + current_vectors[0].ib = requested_vectors[0].ib; + } + + if (clstate.enable) { + MSM_BUS_DBG("Updating request for shell client, index: %d\n", + clstate.current_index); + ret = msm_bus_scale_client_update_request(clstate.cl, + clstate.current_index); + } else + MSM_BUS_DBG("Enable bit not set. Skipping update request\n"); + + return ret; +} + +static void msm_bus_dbg_unregister_client(uint32_t cl) +{ + MSM_BUS_DBG("Unregistering shell client\n"); + msm_bus_scale_unregister_client(clstate.cl); + clstate.cl = 0; +} + +static uint32_t msm_bus_dbg_register_client(void) +{ + int ret = 0; + + if (init_vectors[0].src != requested_vectors[0].src) { + MSM_BUS_DBG("Shell client master changed. Unregistering\n"); + msm_bus_dbg_unregister_client(clstate.cl); + } + if (init_vectors[0].dst != requested_vectors[0].dst) { + MSM_BUS_DBG("Shell client slave changed. Unregistering\n"); + msm_bus_dbg_unregister_client(clstate.cl); + } + + current_vectors[0].src = init_vectors[0].src; + requested_vectors[0].src = init_vectors[0].src; + current_vectors[0].dst = init_vectors[0].dst; + requested_vectors[0].dst = init_vectors[0].dst; + + if (!clstate.enable) { + MSM_BUS_DBG("Enable bit not set, skipping registration: cl " + "%d\n", clstate.cl); + return 0; + } + + if (clstate.cl) { + MSM_BUS_DBG("Client registered, skipping registration\n"); + return clstate.cl; + } + + MSM_BUS_DBG("Registering shell client\n"); + ret = msm_bus_scale_register_client(&shell_client); + return ret; +} + +static int msm_bus_dbg_mas_get(void *data, u64 *val) +{ + *val = init_vectors[0].src; + MSM_BUS_DBG("Get master: %llu\n", *val); + return 0; +} + +static int msm_bus_dbg_mas_set(void *data, u64 val) +{ + init_vectors[0].src = val; + MSM_BUS_DBG("Set master: %llu\n", val); + clstate.cl = msm_bus_dbg_register_client(); + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get, + msm_bus_dbg_mas_set, "%llu\n"); + +static int msm_bus_dbg_slv_get(void *data, u64 *val) +{ + *val = init_vectors[0].dst; + MSM_BUS_DBG("Get slave: %llu\n", *val); + return 0; +} + +static int msm_bus_dbg_slv_set(void *data, u64 val) +{ + init_vectors[0].dst = val; + MSM_BUS_DBG("Set slave: %llu\n", val); + clstate.cl = msm_bus_dbg_register_client(); + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get, + msm_bus_dbg_slv_set, "%llu\n"); + +static int msm_bus_dbg_ab_get(void *data, u64 *val) +{ + *val = requested_vectors[0].ab; + MSM_BUS_DBG("Get ab: %llu\n", *val); + return 0; +} + +static int msm_bus_dbg_ab_set(void *data, u64 val) +{ + requested_vectors[0].ab = val; + MSM_BUS_DBG("Set ab: %llu\n", val); + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get, + msm_bus_dbg_ab_set, "%llu\n"); + +static int msm_bus_dbg_ib_get(void *data, u64 *val) +{ + *val = requested_vectors[0].ib; + MSM_BUS_DBG("Get ib: %llu\n", *val); + return 0; +} + +static int msm_bus_dbg_ib_set(void *data, u64 val) +{ + requested_vectors[0].ib = val; + MSM_BUS_DBG("Set ib: %llu\n", val); + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get, + msm_bus_dbg_ib_set, "%llu\n"); + +static int msm_bus_dbg_en_get(void *data, u64 *val) +{ + *val = clstate.enable; + MSM_BUS_DBG("Get enable: %llu\n", *val); + return 0; +} + +static int msm_bus_dbg_en_set(void *data, u64 val) +{ + int ret = 0; + + clstate.enable = val; + if (clstate.enable) { + if (!clstate.cl) { + MSM_BUS_DBG("client: %u\n", clstate.cl); + clstate.cl = msm_bus_dbg_register_client(); + if (clstate.cl) + ret = msm_bus_dbg_update_cl_request(clstate.cl); + } else { + MSM_BUS_DBG("update request for cl: %u\n", clstate.cl); + ret = msm_bus_dbg_update_cl_request(clstate.cl); + } + } + + MSM_BUS_DBG("Set enable: %llu\n", val); + return ret; +} +DEFINE_SIMPLE_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get, + msm_bus_dbg_en_set, "%llu\n"); + +/** + * The following funtions are used for viewing the client data + * and changing the client request at run-time + */ + +static ssize_t client_data_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + int bsize = 0; + uint32_t cl = (uint32_t)(uintptr_t)file->private_data; + struct msm_bus_cldata *cldata = NULL; + const struct msm_bus_client_handle *handle = file->private_data; + int found = 0; + + list_for_each_entry(cldata, &cl_list, list) { + if ((cldata->clid == cl) || + (cldata->handle && (cldata->handle == handle))) { + found = 1; + break; + } + } + + if (!found) + return 0; + + bsize = cldata->size; + return simple_read_from_buffer(buf, count, ppos, + cldata->buffer, bsize); +} + +static int client_data_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static const struct file_operations client_data_fops = { + .open = client_data_open, + .read = client_data_read, +}; + +struct dentry *msm_bus_dbg_create(const char *name, mode_t mode, + struct dentry *dent, uint32_t clid) +{ + if (dent == NULL) { + MSM_BUS_DBG("debugfs not ready yet\n"); + return NULL; + } + return debugfs_create_file(name, mode, dent, (void *)(uintptr_t)clid, + &client_data_fops); +} + +int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata) + +{ + struct msm_bus_cldata *cldata; + + cldata = kzalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL); + if (!cldata) { + MSM_BUS_DBG("Failed to allocate memory for client data\n"); + return -ENOMEM; + } + cldata->handle = pdata; + list_add_tail(&cldata->list, &cl_list); + return 0; +} + +int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata, + u64 ab, u64 ib) +{ + struct msm_bus_cldata *cldata; + int i; + struct timespec ts; + bool found = false; + char *buf = NULL; + + list_for_each_entry(cldata, &cl_list, list) { + if (cldata->handle == pdata) { + found = true; + break; + } + } + + if (!found) + return -ENOENT; + + if (cldata->file == NULL) { + if (pdata->name == NULL) { + MSM_BUS_DBG("Client doesn't have a name\n"); + return -EINVAL; + } + pr_err("\n%s setting up debugfs %s", __func__, pdata->name); + cldata->file = debugfs_create_file(pdata->name, S_IRUGO, + clients, (void *)pdata, &client_data_fops); + } + + if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT)) + i = cldata->size; + else { + i = 0; + cldata->size = 0; + } + buf = cldata->buffer; + ts = ktime_to_timespec(ktime_get()); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n", + (int)ts.tv_sec, (int)ts.tv_nsec); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "master: "); + + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->mas); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslave : "); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->slv); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab : "); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", ab); + + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib : "); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", ib); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n"); + cldata->size = i; + + trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec, + pdata->name, pdata->mas, pdata->slv, ab, ib); + + return i; +} + +void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata) +{ + struct msm_bus_cldata *cldata = NULL; + + list_for_each_entry(cldata, &cl_list, list) { + if (cldata->handle == pdata) { + debugfs_remove(cldata->file); + list_del(&cldata->list); + kfree(cldata); + break; + } + } +} + +static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata, + int index, uint32_t clid, struct dentry *file) +{ + struct msm_bus_cldata *cldata; + + cldata = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL); + if (!cldata) { + MSM_BUS_DBG("Failed to allocate memory for client data\n"); + return -ENOMEM; + } + cldata->pdata = pdata; + cldata->index = index; + cldata->clid = clid; + cldata->file = file; + cldata->size = 0; + list_add_tail(&cldata->list, &cl_list); + return 0; +} + +static void msm_bus_dbg_free_client(uint32_t clid) +{ + struct msm_bus_cldata *cldata = NULL; + + list_for_each_entry(cldata, &cl_list, list) { + if (cldata->clid == clid) { + debugfs_remove(cldata->file); + list_del(&cldata->list); + kfree(cldata); + break; + } + } +} + +static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata, + int index, uint32_t clid) +{ + int i = 0, j; + char *buf = NULL; + struct msm_bus_cldata *cldata = NULL; + struct timespec ts; + int found = 0; + + list_for_each_entry(cldata, &cl_list, list) { + if (cldata->clid == clid) { + found = 1; + break; + } + } + + if (!found) + return -ENOENT; + + if (cldata->file == NULL) { + if (pdata->name == NULL) { + MSM_BUS_DBG("Client doesn't have a name\n"); + return -EINVAL; + } + cldata->file = msm_bus_dbg_create(pdata->name, S_IRUGO, + clients, clid); + } + + if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT)) + i = cldata->size; + else { + i = 0; + cldata->size = 0; + } + buf = cldata->buffer; + ts = ktime_to_timespec(ktime_get()); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n", + (int)ts.tv_sec, (int)ts.tv_nsec); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr : %d\n", index); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: "); + + for (j = 0; j < pdata->usecase->num_paths; j++) + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", + pdata->usecase[index].vectors[j].src); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : "); + for (j = 0; j < pdata->usecase->num_paths; j++) + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", + pdata->usecase[index].vectors[j].dst); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab : "); + for (j = 0; j < pdata->usecase->num_paths; j++) + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", + pdata->usecase[index].vectors[j].ab); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib : "); + for (j = 0; j < pdata->usecase->num_paths; j++) + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", + pdata->usecase[index].vectors[j].ib); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n"); + + for (j = 0; j < pdata->usecase->num_paths; j++) + trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec, + pdata->name, + pdata->usecase[index].vectors[j].src, + pdata->usecase[index].vectors[j].dst, + pdata->usecase[index].vectors[j].ab, + pdata->usecase[index].vectors[j].ib); + + cldata->index = index; + cldata->size = i; + return i; +} + +static int msm_bus_dbg_update_request(struct msm_bus_cldata *cldata, int index) +{ + int ret = 0; + + if ((index < 0) || (index > cldata->pdata->num_usecases)) { + MSM_BUS_DBG("Invalid index!\n"); + return -EINVAL; + } + ret = msm_bus_scale_client_update_request(cldata->clid, index); + return ret; +} + +static ssize_t msm_bus_dbg_update_request_write(struct file *file, + const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + struct msm_bus_cldata *cldata; + unsigned long index = 0; + int ret = 0; + char *chid; + char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL); + int found = 0; + + if (!buf || IS_ERR(buf)) { + MSM_BUS_ERR("Memory allocation for buffer failed\n"); + return -ENOMEM; + } + if (cnt == 0) + return 0; + if (copy_from_user(buf, ubuf, cnt)) + return -EFAULT; + buf[cnt] = '\0'; + chid = buf; + MSM_BUS_DBG("buffer: %s\n size: %zu\n", buf, sizeof(ubuf)); + + list_for_each_entry(cldata, &cl_list, list) { + if (strnstr(chid, cldata->pdata->name, cnt)) { + found = 1; + cldata = cldata; + strsep(&chid, " "); + if (chid) { + ret = kstrtoul(chid, 10, &index); + if (ret) { + MSM_BUS_DBG("Index conversion" + " failed\n"); + return -EFAULT; + } + } else { + MSM_BUS_DBG("Error parsing input. Index not" + " found\n"); + found = 0; + } + break; + } + } + + if (found) + msm_bus_dbg_update_request(cldata, index); + kfree(buf); + return cnt; +} + +/** + * The following funtions are used for viewing the commit data + * for each fabric + */ +static ssize_t fabric_data_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct msm_bus_fab_list *fablist = NULL; + int bsize = 0; + ssize_t ret; + const char *name = file->private_data; + int found = 0; + + mutex_lock(&msm_bus_dbg_fablist_lock); + list_for_each_entry(fablist, &fabdata_list, list) { + if (strcmp(fablist->name, name) == 0) { + found = 1; + break; + } + } + if (!found) + return -ENOENT; + bsize = fablist->size; + ret = simple_read_from_buffer(buf, count, ppos, + fablist->buffer, bsize); + mutex_unlock(&msm_bus_dbg_fablist_lock); + return ret; +} + +static const struct file_operations fabric_data_fops = { + .open = client_data_open, + .read = fabric_data_read, +}; + +static ssize_t rules_dbg_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + ssize_t ret; + memset(rules_buf, 0, MAX_BUFF_SIZE); + print_rules_buf(rules_buf, MAX_BUFF_SIZE); + ret = simple_read_from_buffer(buf, count, ppos, + rules_buf, MAX_BUFF_SIZE); + return ret; +} + +static int rules_dbg_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static const struct file_operations rules_dbg_fops = { + .open = rules_dbg_open, + .read = rules_dbg_read, +}; + +static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file) +{ + struct msm_bus_fab_list *fablist; + int ret = 0; + + mutex_lock(&msm_bus_dbg_fablist_lock); + fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL); + if (!fablist) { + MSM_BUS_DBG("Failed to allocate memory for commit data\n"); + ret = -ENOMEM; + goto err; + } + + fablist->name = fabname; + fablist->size = 0; + list_add_tail(&fablist->list, &fabdata_list); +err: + mutex_unlock(&msm_bus_dbg_fablist_lock); + return ret; +} + +static void msm_bus_dbg_free_fabric(const char *fabname) +{ + struct msm_bus_fab_list *fablist = NULL; + + mutex_lock(&msm_bus_dbg_fablist_lock); + list_for_each_entry(fablist, &fabdata_list, list) { + if (strcmp(fablist->name, fabname) == 0) { + debugfs_remove(fablist->file); + list_del(&fablist->list); + kfree(fablist); + break; + } + } + mutex_unlock(&msm_bus_dbg_fablist_lock); +} + +static int msm_bus_dbg_fill_fab_buffer(const char *fabname, + void *cdata, int nmasters, int nslaves, + int ntslaves) +{ + int i; + char *buf = NULL; + struct msm_bus_fab_list *fablist = NULL; + struct timespec ts; + int found = 0; + + mutex_lock(&msm_bus_dbg_fablist_lock); + list_for_each_entry(fablist, &fabdata_list, list) { + if (strcmp(fablist->name, fabname) == 0) { + found = 1; + break; + } + } + if (!found) + return -ENOENT; + + if (fablist->file == NULL) { + MSM_BUS_DBG("Fabric dbg entry does not exist\n"); + mutex_unlock(&msm_bus_dbg_fablist_lock); + return -EFAULT; + } + + if (fablist->size < MAX_BUFF_SIZE - 256) + i = fablist->size; + else { + i = 0; + fablist->size = 0; + } + buf = fablist->buffer; + mutex_unlock(&msm_bus_dbg_fablist_lock); + ts = ktime_to_timespec(ktime_get()); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n", + (int)ts.tv_sec, (int)ts.tv_nsec); + + msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata, + nmasters, nslaves, ntslaves); + i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n"); + mutex_lock(&msm_bus_dbg_fablist_lock); + fablist->size = i; + mutex_unlock(&msm_bus_dbg_fablist_lock); + return 0; +} + +static const struct file_operations msm_bus_dbg_update_request_fops = { + .open = client_data_open, + .write = msm_bus_dbg_update_request_write, +}; + +/** + * msm_bus_dbg_client_data() - Add debug data for clients + * @pdata: Platform data of the client + * @index: The current index or operation to be performed + * @clid: Client handle obtained during registration + */ +void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index, + uint32_t clid) +{ + struct dentry *file = NULL; + + if (index == MSM_BUS_DBG_REGISTER) { + msm_bus_dbg_record_client(pdata, index, clid, file); + if (!pdata->name) { + MSM_BUS_DBG("Cannot create debugfs entry. Null name\n"); + return; + } + } else if (index == MSM_BUS_DBG_UNREGISTER) { + msm_bus_dbg_free_client(clid); + MSM_BUS_DBG("Client %d unregistered\n", clid); + } else + msm_bus_dbg_fill_cl_buffer(pdata, index, clid); +} +EXPORT_SYMBOL(msm_bus_dbg_client_data); + +/** + * msm_bus_dbg_commit_data() - Add commit data from fabrics + * @fabname: Fabric name specified in platform data + * @cdata: Commit Data + * @nmasters: Number of masters attached to fabric + * @nslaves: Number of slaves attached to fabric + * @ntslaves: Number of tiered slaves attached to fabric + * @op: Operation to be performed + */ +void msm_bus_dbg_commit_data(const char *fabname, void *cdata, + int nmasters, int nslaves, int ntslaves, int op) +{ + struct dentry *file = NULL; + + if (op == MSM_BUS_DBG_REGISTER) + msm_bus_dbg_record_fabric(fabname, file); + else if (op == MSM_BUS_DBG_UNREGISTER) + msm_bus_dbg_free_fabric(fabname); + else + msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters, + nslaves, ntslaves); +} +EXPORT_SYMBOL(msm_bus_dbg_commit_data); + +static int __init msm_bus_debugfs_init(void) +{ + struct dentry *commit, *shell_client, *rules_dbg; + struct msm_bus_fab_list *fablist; + struct msm_bus_cldata *cldata = NULL; + uint64_t val = 0; + + dir = debugfs_create_dir("msm-bus-dbg", NULL); + if ((!dir) || IS_ERR(dir)) { + MSM_BUS_ERR("Couldn't create msm-bus-dbg\n"); + goto err; + } + + clients = debugfs_create_dir("client-data", dir); + if ((!dir) || IS_ERR(dir)) { + MSM_BUS_ERR("Couldn't create clients\n"); + goto err; + } + + shell_client = debugfs_create_dir("shell-client", dir); + if ((!dir) || IS_ERR(dir)) { + MSM_BUS_ERR("Couldn't create clients\n"); + goto err; + } + + commit = debugfs_create_dir("commit-data", dir); + if ((!dir) || IS_ERR(dir)) { + MSM_BUS_ERR("Couldn't create commit\n"); + goto err; + } + + rules_dbg = debugfs_create_dir("rules-dbg", dir); + if ((!rules_dbg) || IS_ERR(rules_dbg)) { + MSM_BUS_ERR("Couldn't create rules-dbg\n"); + goto err; + } + + if (debugfs_create_file("print_rules", S_IRUGO | S_IWUSR, + rules_dbg, &val, &rules_dbg_fops) == NULL) + goto err; + + if (debugfs_create_file("update_request", S_IRUGO | S_IWUSR, + shell_client, &val, &shell_client_en_fops) == NULL) + goto err; + if (debugfs_create_file("ib", S_IRUGO | S_IWUSR, shell_client, &val, + &shell_client_ib_fops) == NULL) + goto err; + if (debugfs_create_file("ab", S_IRUGO | S_IWUSR, shell_client, &val, + &shell_client_ab_fops) == NULL) + goto err; + if (debugfs_create_file("slv", S_IRUGO | S_IWUSR, shell_client, + &val, &shell_client_slv_fops) == NULL) + goto err; + if (debugfs_create_file("mas", S_IRUGO | S_IWUSR, shell_client, + &val, &shell_client_mas_fops) == NULL) + goto err; + if (debugfs_create_file("update-request", S_IRUGO | S_IWUSR, + clients, NULL, &msm_bus_dbg_update_request_fops) == NULL) + goto err; + + rules_buf = kzalloc(MAX_BUFF_SIZE, GFP_KERNEL); + if (!rules_buf) { + MSM_BUS_ERR("Failed to alloc rules_buf"); + goto err; + } + + list_for_each_entry(cldata, &cl_list, list) { + if (cldata->pdata) { + if (cldata->pdata->name == NULL) { + MSM_BUS_DBG("Client name not found\n"); + continue; + } + cldata->file = msm_bus_dbg_create(cldata-> + pdata->name, S_IRUGO, clients, cldata->clid); + } else if (cldata->handle) { + if (cldata->handle->name == NULL) { + MSM_BUS_DBG("Client doesn't have a name\n"); + continue; + } + cldata->file = debugfs_create_file(cldata->handle->name, + S_IRUGO, clients, + (void *)cldata->handle, + &client_data_fops); + } + } + + mutex_lock(&msm_bus_dbg_fablist_lock); + list_for_each_entry(fablist, &fabdata_list, list) { + fablist->file = debugfs_create_file(fablist->name, S_IRUGO, + commit, (void *)fablist->name, &fabric_data_fops); + if (fablist->file == NULL) { + MSM_BUS_DBG("Cannot create files for commit data\n"); + kfree(rules_buf); + goto err; + } + } + mutex_unlock(&msm_bus_dbg_fablist_lock); + + msm_bus_dbg_init_vectors(); + return 0; +err: + debugfs_remove_recursive(dir); + return -ENODEV; +} +late_initcall(msm_bus_debugfs_init); + +static void __exit msm_bus_dbg_teardown(void) +{ + struct msm_bus_fab_list *fablist = NULL, *fablist_temp; + struct msm_bus_cldata *cldata = NULL, *cldata_temp; + + debugfs_remove_recursive(dir); + list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) { + list_del(&cldata->list); + kfree(cldata); + } + mutex_lock(&msm_bus_dbg_fablist_lock); + list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) { + list_del(&fablist->list); + kfree(fablist); + } + kfree(rules_buf); + mutex_unlock(&msm_bus_dbg_fablist_lock); +} +module_exit(msm_bus_dbg_teardown); +MODULE_DESCRIPTION("Debugfs for msm bus scaling client"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Gagan Mac <gmac@codeaurora.org>"); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c new file mode 100644 index 000000000000..2714d8a42399 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c @@ -0,0 +1,589 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is Mree software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/msm-bus.h> +#include <linux/msm-bus-board.h> +#include "msm_bus_adhoc.h" + +struct msm_bus_floor_client_type { + int mas_id; + int slv_id; + struct msm_bus_client_handle *vote_handle; + struct device *dev; + u64 cur_vote_hz; + int active_only; +}; + +static struct class *bus_floor_class; +#define MAX_VOTER_NAME (50) +#define DEFAULT_NODE_WIDTH (8) +#define DBG_NAME(s) (strnstr(s, "-", 7) + 1) + +static int get_id(void) +{ + static int dev_id = MSM_BUS_INT_TEST_ID; + int id = dev_id; + + if (id >= MSM_BUS_INT_TEST_LAST) + id = -EINVAL; + else + dev_id++; + + return id; +} + +static ssize_t bus_floor_active_only_show(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + struct msm_bus_floor_client_type *cl; + + cl = dev_get_drvdata(dev); + + if (!cl) { + pr_err("%s: Can't find cl", __func__); + return 0; + } + return snprintf(buf, sizeof(int), "%d", cl->active_only); +} + +static ssize_t bus_floor_active_only_store(struct device *dev, + struct device_attribute *dev_attr, const char *buf, + size_t n) +{ + struct msm_bus_floor_client_type *cl; + + cl = dev_get_drvdata(dev); + + if (!cl) { + pr_err("%s: Can't find cl", __func__); + return 0; + } + + if (sscanf(buf, "%d", &cl->active_only) != 1) { + pr_err("%s:return error", __func__); + return -EINVAL; + } + + return n; +} + +static ssize_t bus_floor_vote_show(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + struct msm_bus_floor_client_type *cl; + + cl = dev_get_drvdata(dev); + + if (!cl) { + pr_err("%s: Can't find cl", __func__); + return 0; + } + return snprintf(buf, sizeof(u64), "%llu", cl->cur_vote_hz); +} + +static ssize_t bus_floor_vote_store(struct device *dev, + struct device_attribute *dev_attr, const char *buf, + size_t n) +{ + struct msm_bus_floor_client_type *cl; + int ret = 0; + + cl = dev_get_drvdata(dev); + + if (!cl) { + pr_err("%s: Can't find cl", __func__); + return 0; + } + + if (sscanf(buf, "%llu", &cl->cur_vote_hz) != 1) { + pr_err("%s:return error", __func__); + return -EINVAL; + } + + ret = msm_bus_floor_vote_context(dev_name(dev), cl->cur_vote_hz, + cl->active_only); + return n; +} + +static ssize_t bus_floor_vote_store_api(struct device *dev, + struct device_attribute *dev_attr, const char *buf, + size_t n) +{ + struct msm_bus_floor_client_type *cl; + int ret = 0; + char name[10]; + u64 vote_khz = 0; + + cl = dev_get_drvdata(dev); + + if (!cl) { + pr_err("%s: Can't find cl", __func__); + return 0; + } + + if (sscanf(buf, "%s %llu", name, &vote_khz) != 2) { + pr_err("%s:return error", __func__); + return -EINVAL; + } + + pr_info("%s: name %s vote %llu\n", + __func__, name, vote_khz); + + ret = msm_bus_floor_vote(name, vote_khz); + return n; +} + +static DEVICE_ATTR(floor_vote, S_IRUGO | S_IWUSR, + bus_floor_vote_show, bus_floor_vote_store); + +static DEVICE_ATTR(floor_vote_api, S_IRUGO | S_IWUSR, + bus_floor_vote_show, bus_floor_vote_store_api); + +static DEVICE_ATTR(floor_active_only, S_IRUGO | S_IWUSR, + bus_floor_active_only_show, bus_floor_active_only_store); + +static int msm_bus_floor_init_dev(struct device *fab_dev, + struct device *dev, bool is_master) +{ + struct msm_bus_node_device_type *bus_node = NULL; + struct msm_bus_node_device_type *fab_node = NULL; + struct msm_bus_node_info_type *node_info = NULL; + int ret = 0; + + if (!(fab_dev && dev)) { + ret = -ENXIO; + goto exit_init_bus_dev; + } + + fab_node = fab_dev->platform_data; + + if (!fab_node) { + pr_info("\n%s: Can't create device", __func__); + ret = -ENXIO; + goto exit_init_bus_dev; + } + + device_initialize(dev); + bus_node = devm_kzalloc(dev, + sizeof(struct msm_bus_node_device_type), GFP_KERNEL); + + if (!bus_node) { + pr_err("%s:Bus node alloc failed\n", __func__); + ret = -ENOMEM; + goto exit_init_bus_dev; + } + + node_info = devm_kzalloc(dev, + sizeof(struct msm_bus_node_info_type), GFP_KERNEL); + + if (!node_info) { + pr_err("%s:Bus node info alloc failed\n", __func__); + devm_kfree(dev, bus_node); + ret = -ENOMEM; + goto exit_init_bus_dev; + } + + bus_node->node_info = node_info; + bus_node->ap_owned = true; + bus_node->node_info->bus_device = fab_dev; + bus_node->node_info->buswidth = 8; + dev->platform_data = bus_node; + dev->bus = &msm_bus_type; + + bus_node->node_info->id = get_id(); + if (bus_node->node_info->id < 0) { + pr_err("%s: Failed to get id for dev. Bus:%s is_master:%d", + __func__, fab_node->node_info->name, is_master); + ret = -ENXIO; + goto exit_init_bus_dev; + } + + dev_set_name(dev, "testnode-%s-%s", (is_master ? "mas" : "slv"), + fab_node->node_info->name); + + ret = device_add(dev); + if (ret < 0) { + pr_err("%s: Failed to add %s", __func__, dev_name(dev)); + goto exit_init_bus_dev; + } + +exit_init_bus_dev: + return ret; +} + +static int msm_bus_floor_show_info(struct device *dev, void *data) +{ + if (dev) + pr_err(" %s\n", dev_name(dev)); + return 0; +} + +static void msm_bus_floor_pr_usage(void) +{ + pr_err("msm_bus_floor_vote: Supported buses\n"); + class_for_each_device(bus_floor_class, NULL, NULL, + msm_bus_floor_show_info); +} + +static int msm_bus_floor_match(struct device *dev, const void *data) +{ + int ret = 0; + + if (!(dev && data)) + return ret; + + if (strnstr(dev_name(dev), data, MAX_VOTER_NAME)) + ret = 1; + + return ret; +} + +int msm_bus_floor_vote(const char *name, u64 floor_hz) +{ + int ret = -EINVAL; + struct msm_bus_floor_client_type *cl; + bool found = false; + struct device *dbg_voter = NULL; + + if (!name) { + pr_err("%s: NULL name", __func__); + return -EINVAL; + } + + dbg_voter = class_find_device(bus_floor_class, NULL, + name, msm_bus_floor_match); + if (dbg_voter) { + found = true; + cl = dev_get_drvdata(dbg_voter); + + if (!cl) { + pr_err("%s: Can't find cl", __func__); + goto exit_bus_floor_vote; + } + + if (!cl->vote_handle) { + char cl_name[MAX_VOTER_NAME]; + snprintf(cl_name, MAX_VOTER_NAME, "%s-floor-voter", + dev_name(cl->dev)); + cl->vote_handle = msm_bus_scale_register(cl->mas_id, + cl->slv_id, cl_name, false); + if (!cl->vote_handle) { + ret = -ENXIO; + goto exit_bus_floor_vote; + } + } + + cl->cur_vote_hz = floor_hz; + ret = msm_bus_scale_update_bw(cl->vote_handle, 0, + (floor_hz * DEFAULT_NODE_WIDTH)); + if (ret) { + pr_err("%s: Failed to update %s", __func__, + name); + goto exit_bus_floor_vote; + } + } else { + pr_err("\n%s:No matching voting device found for %s", __func__, + name); + msm_bus_floor_pr_usage(); + } + +exit_bus_floor_vote: + if (dbg_voter) + put_device(dbg_voter); + + return ret; +} +EXPORT_SYMBOL(msm_bus_floor_vote); + +int msm_bus_floor_vote_context(const char *name, u64 floor_hz, + bool active_only) +{ + int ret = -EINVAL; + struct msm_bus_floor_client_type *cl; + bool found = false; + struct device *dbg_voter = NULL; + + if (!name) { + pr_err("%s: NULL name", __func__); + return -EINVAL; + } + + dbg_voter = class_find_device(bus_floor_class, NULL, + name, msm_bus_floor_match); + if (dbg_voter) { + found = true; + cl = dev_get_drvdata(dbg_voter); + + if (!cl) { + pr_err("%s: Can't find cl", __func__); + goto exit_bus_floor_vote_context; + } + + if (!(cl->vote_handle && + (cl->vote_handle->active_only == active_only))) { + char cl_name[MAX_VOTER_NAME]; + + if (cl->vote_handle) + msm_bus_scale_unregister(cl->vote_handle); + + snprintf(cl_name, MAX_VOTER_NAME, "%s-floor-voter", + dev_name(cl->dev)); + cl->vote_handle = msm_bus_scale_register(cl->mas_id, + cl->slv_id, (char *)dev_name(cl->dev), + active_only); + if (!cl->vote_handle) { + ret = -ENXIO; + goto exit_bus_floor_vote_context; + } + } + + cl->cur_vote_hz = floor_hz; + ret = msm_bus_scale_update_bw(cl->vote_handle, 0, + (floor_hz * DEFAULT_NODE_WIDTH)); + if (ret) { + pr_err("%s: Failed to update %s", __func__, + name); + goto exit_bus_floor_vote_context; + } + } else { + pr_err("\n%s:No matching voting device found for %s", __func__, + name); + msm_bus_floor_pr_usage(); + } + +exit_bus_floor_vote_context: + if (dbg_voter) + put_device(dbg_voter); + + return ret; +} +EXPORT_SYMBOL(msm_bus_floor_vote_context); + +static int msm_bus_floor_setup_dev_conn(struct device *mas, struct device *slv) +{ + int ret = 0; + int slv_id = 0; + struct msm_bus_node_device_type *mas_node = NULL; + struct msm_bus_node_device_type *slv_node = NULL; + + if (!(mas && slv)) { + pr_err("\n%s: Invalid master/slave device", __func__); + ret = -ENXIO; + goto exit_setup_dev_conn; + } + + mas_node = mas->platform_data; + slv_node = slv->platform_data; + + if (!(mas_node && slv_node)) { + ret = -ENXIO; + goto exit_setup_dev_conn; + } + + slv_id = slv_node->node_info->id; + mas_node->node_info->num_connections = 1; + mas_node->node_info->connections = devm_kzalloc(mas, + (sizeof(int) * mas_node->node_info->num_connections), + GFP_KERNEL); + + if (!mas_node->node_info->connections) { + pr_err("%s:Bus node connections info alloc failed\n", __func__); + ret = -ENOMEM; + goto exit_setup_dev_conn; + } + + mas_node->node_info->dev_connections = devm_kzalloc(mas, + (sizeof(struct device *) * + mas_node->node_info->num_connections), + GFP_KERNEL); + + if (!mas_node->node_info->dev_connections) { + pr_err("%s:Bus node dev connections info alloc failed\n", + __func__); + ret = -ENOMEM; + goto exit_setup_dev_conn; + } + mas_node->node_info->connections[0] = slv_id; + mas_node->node_info->dev_connections[0] = slv; + +exit_setup_dev_conn: + return ret; +} + +static int msm_bus_floor_setup_floor_dev( + struct msm_bus_node_device_type *mas_node, + struct msm_bus_node_device_type *slv_node, + struct msm_bus_node_device_type *bus_node) +{ + struct msm_bus_floor_client_type *cl_ptr = NULL; + int ret = 0; + char *name = NULL; + + cl_ptr = kzalloc(sizeof(struct msm_bus_floor_client_type), GFP_KERNEL); + if (!cl_ptr) { + pr_err("\n%s: Err Allocating mem for cl ptr bus %d", + __func__, bus_node->node_info->id); + ret = -ENOMEM; + goto err_setup_floor_dev; + } + + if (!bus_floor_class) { + bus_floor_class = class_create(THIS_MODULE, "bus-voter"); + if (IS_ERR(bus_floor_class)) { + ret = -ENXIO; + pr_err("%s: Error creating dev class", __func__); + goto err_setup_floor_dev; + } + } + + name = DBG_NAME(bus_node->node_info->name); + if (!name) { + pr_err("%s: Invalid name derived for %s", __func__, + bus_node->node_info->name); + ret = -EINVAL; + goto err_setup_floor_dev; + } + + cl_ptr->dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!cl_ptr->dev) { + pr_err("%s: Failed to create device bus %d", __func__, + bus_node->node_info->id); + goto err_setup_floor_dev; + } + + device_initialize(cl_ptr->dev); + cl_ptr->dev->class = bus_floor_class; + dev_set_name(cl_ptr->dev, "%s", name); + dev_set_drvdata(cl_ptr->dev, cl_ptr); + ret = device_add(cl_ptr->dev); + + if (ret < 0) { + pr_err("%s: Failed to add device bus %d", __func__, + bus_node->node_info->id); + goto err_setup_floor_dev; + } + + cl_ptr->mas_id = mas_node->node_info->id; + cl_ptr->slv_id = slv_node->node_info->id; + + ret = device_create_file(cl_ptr->dev, &dev_attr_floor_vote); + if (ret < 0) + goto err_setup_floor_dev; + + ret = device_create_file(cl_ptr->dev, &dev_attr_floor_vote_api); + if (ret < 0) + goto err_setup_floor_dev; + + ret = device_create_file(cl_ptr->dev, &dev_attr_floor_active_only); + if (ret < 0) + goto err_setup_floor_dev; + + return ret; + +err_setup_floor_dev: + kfree(cl_ptr); + return ret; +} + +int msm_bus_floor_init(struct device *dev) +{ + struct device *m_dev = NULL; + struct device *s_dev = NULL; + struct msm_bus_node_device_type *mas_node = NULL; + struct msm_bus_node_device_type *slv_node = NULL; + struct msm_bus_node_device_type *bus_node = NULL; + int ret = 0; + + if (!dev) { + pr_info("\n%s: Can't create voting client", __func__); + ret = -ENXIO; + goto exit_floor_init; + } + + bus_node = dev->platform_data; + if (!(bus_node && bus_node->node_info->is_fab_dev)) { + pr_info("\n%s: Can't create voting client, not a fab device", + __func__); + ret = -ENXIO; + goto exit_floor_init; + } + + m_dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!m_dev) { + pr_err("%s:Master Device alloc failed\n", __func__); + m_dev = NULL; + ret = -ENOMEM; + goto exit_floor_init; + } + + s_dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!m_dev) { + pr_err("%s:Slave Device alloc failed\n", __func__); + s_dev = NULL; + kfree(m_dev); + ret = -ENOMEM; + m_dev = NULL; + goto exit_floor_init; + } + + ret = msm_bus_floor_init_dev(dev, m_dev, true); + if (ret) { + pr_err("\n%s: Error setting up master dev, bus %d", + __func__, bus_node->node_info->id); + kfree(m_dev); + kfree(s_dev); + goto exit_floor_init; + } + + ret = msm_bus_floor_init_dev(dev, s_dev, false); + if (ret) { + pr_err("\n%s: Error setting up slave dev, bus %d", + __func__, bus_node->node_info->id); + kfree(m_dev); + kfree(s_dev); + goto exit_floor_init; + } + + ret = msm_bus_floor_setup_dev_conn(m_dev, s_dev); + if (ret) { + pr_err("\n%s: Error setting up connections bus %d", + __func__, bus_node->node_info->id); + goto err_floor_init; + } + + mas_node = m_dev->platform_data; + slv_node = s_dev->platform_data; + if ((!(mas_node && slv_node))) { + pr_err("\n%s: Error getting mas/slv nodes bus %d", + __func__, bus_node->node_info->id); + goto err_floor_init; + } + + ret = msm_bus_floor_setup_floor_dev(mas_node, slv_node, bus_node); + if (ret) { + pr_err("\n%s: Error getting mas/slv nodes bus %d", + __func__, bus_node->node_info->id); + goto err_floor_init; + } + +exit_floor_init: + return ret; +err_floor_init: + device_unregister(m_dev); + device_unregister(s_dev); + kfree(m_dev); + kfree(s_dev); + return ret; +} diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric.c new file mode 100644 index 000000000000..b38b35197d8a --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric.c @@ -0,0 +1,970 @@ +/* Copyright (c) 2010-2014, Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/radix-tree.h> +#include "msm_bus_core.h" + +enum { + DISABLE, + ENABLE, +}; + +/** + * msm_bus_fabric_add_node() - Add a node to the fabric structure + * @fabric: Fabric device to which the node should be added + * @info: The node to be added + */ +static int msm_bus_fabric_add_node(struct msm_bus_fabric *fabric, + struct msm_bus_inode_info *info) +{ + int status = -ENOMEM, ctx; + MSM_BUS_DBG("msm_bus_fabric_add_node: ID %d Gw: %d\n", + info->node_info->priv_id, info->node_info->gateway); + status = radix_tree_preload(GFP_ATOMIC); + if (status) + goto out; + + status = radix_tree_insert(&fabric->fab_tree, info->node_info->priv_id, + info); + radix_tree_preload_end(); + if (IS_SLAVE(info->node_info->priv_id)) + radix_tree_tag_set(&fabric->fab_tree, info->node_info->priv_id, + SLAVE_NODE); + + for (ctx = 0; ctx < NUM_CTX; ctx++) { + if (info->node_info->slaveclk[ctx]) { + radix_tree_tag_set(&fabric->fab_tree, + info->node_info->priv_id, CLK_NODE); + break; + } + + info->nodeclk[ctx].enable = false; + info->nodeclk[ctx].dirty = false; + } + + if (info->node_info->nr_lim) { + int iid = msm_bus_board_get_iid(info->node_info->id); + struct msm_bus_fabric_device *fabdev = + msm_bus_get_fabric_device(GET_FABID(iid)); + + if (!fabdev) + BUG_ON(1); + + radix_tree_tag_set(&fabric->fab_tree, + info->node_info->priv_id, MASTER_NODE); + + fabdev->num_nr_lim++; + MSM_BUS_ERR("%s: Adding %d There are %d nodes", __func__, + info->node_info->id, fabdev->num_nr_lim); + } +out: + return status; +} + +/** + * msm_bus_add_fab() - Add a fabric (gateway) to the current fabric + * @fabric: Fabric device to which the gateway info should be added + * @info: Gateway node to be added to the fabric + */ +static int msm_bus_fabric_add_fab(struct msm_bus_fabric *fabric, + struct msm_bus_inode_info *info) +{ + struct msm_bus_fabnodeinfo *fabnodeinfo; + MSM_BUS_DBG("msm_bus_fabric_add_fab: ID %d Gw: %d\n", + info->node_info->priv_id, info->node_info->gateway); + fabnodeinfo = kzalloc(sizeof(struct msm_bus_fabnodeinfo), GFP_KERNEL); + if (fabnodeinfo == NULL) { + MSM_FAB_ERR("msm_bus_fabric_add_fab: " + "No Node Info\n"); + MSM_FAB_ERR("axi: Cannot register fabric!\n"); + return -ENOMEM; + } + + fabnodeinfo->info = info; + fabnodeinfo->info->num_pnodes = -1; + list_add_tail(&fabnodeinfo->list, &fabric->gateways); + return 0; +} + +/** + * register_fabric_info() - Create the internal fabric structure and + * build the topology tree from platform specific data + * @pdev: Platform device for getting base addresses + * @fabric: Fabric to which the gateways, nodes should be added + * + * This function is called from probe. Iterates over the platform data, + * and builds the topology + */ +static int register_fabric_info(struct platform_device *pdev, + struct msm_bus_fabric *fabric) +{ + int i = 0, ret = 0, err = 0; + + MSM_BUS_DBG("id:%d pdata-id: %d len: %d\n", fabric->fabdev.id, + fabric->pdata->id, fabric->pdata->len); + fabric->hw_data = fabric->fabdev.hw_algo.allocate_hw_data(pdev, + fabric->pdata); + if (ZERO_OR_NULL_PTR(fabric->hw_data) && fabric->pdata->ahb == 0) { + MSM_BUS_ERR("Couldn't allocate hw_data for fab: %d\n", + fabric->fabdev.id); + goto error; + } + + for (i = 0; i < fabric->pdata->len; i++) { + struct msm_bus_inode_info *info; + int ctx, j; + + info = kzalloc(sizeof(struct msm_bus_inode_info), GFP_KERNEL); + if (info == NULL) { + MSM_BUS_ERR("Error allocating info\n"); + return -ENOMEM; + } + + info->node_info = fabric->pdata->info + i; + info->commit_index = -1; + info->num_pnodes = -1; + + for (ctx = 0; ctx < NUM_CTX; ctx++) { + if (info->node_info->slaveclk[ctx]) { + info->nodeclk[ctx].clk = clk_get_sys("msm_bus", + info->node_info->slaveclk[ctx]); + if (IS_ERR(info->nodeclk[ctx].clk)) { + MSM_BUS_ERR("Couldn't get clk %s\n", + info->node_info->slaveclk[ctx]); + err = -EINVAL; + } + info->nodeclk[ctx].enable = false; + info->nodeclk[ctx].dirty = false; + } + + if (info->node_info->memclk[ctx]) { + info->memclk[ctx].clk = clk_get_sys("msm_bus", + info->node_info->memclk[ctx]); + if (IS_ERR(info->memclk[ctx].clk)) { + MSM_BUS_ERR("Couldn't get clk %s\n", + info->node_info->memclk[ctx]); + err = -EINVAL; + } + info->memclk[ctx].enable = false; + info->memclk[ctx].dirty = false; + } + } + + if (info->node_info->iface_clk_node) { + info->iface_clk.clk = clk_get_sys(info->node_info-> + iface_clk_node, "iface_clk"); + if (IS_ERR(info->iface_clk.clk)) { + MSM_BUS_ERR("ERR: Couldn't get clk %s\n", + info->node_info->iface_clk_node); + } + } + + ret = info->node_info->gateway ? + msm_bus_fabric_add_fab(fabric, info) : + msm_bus_fabric_add_node(fabric, info); + if (ret) { + MSM_BUS_ERR("Unable to add node info, ret: %d\n", ret); + kfree(info); + goto error; + } + + if (fabric->fabdev.hw_algo.node_init == NULL) + continue; + + if (info->iface_clk.clk) { + MSM_BUS_DBG("Enabled iface clock for node init: %d\n", + info->node_info->priv_id); + clk_prepare_enable(info->iface_clk.clk); + } + + for (j = 0; j < NUM_CTX; j++) + clk_prepare_enable(fabric->info.nodeclk[j].clk); + + fabric->fabdev.hw_algo.node_init(fabric->hw_data, info); + if (ret) { + MSM_BUS_ERR("Unable to init node info, ret: %d\n", ret); + kfree(info); + } + + for (j = 0; j < NUM_CTX; j++) + clk_disable_unprepare(fabric->info.nodeclk[j].clk); + + if (info->iface_clk.clk) { + MSM_BUS_DBG("Disable iface_clk after node init: %d\n", + info->node_info->priv_id); + clk_disable_unprepare(info->iface_clk.clk); + } + + + } + + MSM_BUS_DBG("Fabric: %d nmasters: %d nslaves: %d\n" + " ntieredslaves: %d, rpm_enabled: %d\n", + fabric->fabdev.id, fabric->pdata->nmasters, + fabric->pdata->nslaves, fabric->pdata->ntieredslaves, + fabric->pdata->rpm_enabled); + MSM_BUS_DBG("msm_bus_register_fabric_info i: %d\n", i); + fabric->num_nodes = fabric->pdata->len; +error: + fabric->num_nodes = i; + msm_bus_dbg_commit_data(fabric->fabdev.name, NULL, 0, 0, 0, + MSM_BUS_DBG_REGISTER); + return ret | err; +} + +/** + * msm_bus_fabric_update_clks() - Set the clocks for fabrics and slaves + * @fabric: Fabric for which the clocks need to be updated + * @slave: The node for which the clocks need to be updated + * @index: The index for which the current clocks are set + * @curr_clk_hz:Current clock value + * @req_clk_hz: Requested clock value + * @bwsum: Bandwidth Sum + * @clk_flag: Flag determining whether fabric clock or the slave clock has to + * be set. If clk_flag is set, fabric clock is set, else slave clock is set. + */ +static int msm_bus_fabric_update_clks(struct msm_bus_fabric_device *fabdev, + struct msm_bus_inode_info *slave, int index, + uint64_t curr_clk_hz, uint64_t req_clk_hz, + uint64_t bwsum_hz, int clk_flag, int ctx, + unsigned int cl_active_flag) +{ + int i, status = 0; + uint64_t max_pclk = 0, rate; + uint64_t *pclk = NULL; + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + struct nodeclk *nodeclk; + + /** + * Integration for clock rates is not required if context is not + * same as client's active-only flag + */ + if (ctx != cl_active_flag) + goto skip_set_clks; + + /* Maximum for this gateway */ + for (i = 0; i <= slave->num_pnodes; i++) { + if (i == index && (req_clk_hz < curr_clk_hz)) + continue; + slave->pnode[i].sel_clk = &slave->pnode[i].clk[ctx]; + max_pclk = max(max_pclk, *slave->pnode[i].sel_clk); + } + + *slave->link_info.sel_clk = + max(max_pclk, max(bwsum_hz, req_clk_hz)); + /* Is this gateway or slave? */ + if (clk_flag && (!fabric->ahb)) { + struct msm_bus_fabnodeinfo *fabgw = NULL; + struct msm_bus_inode_info *info = NULL; + /* Maximum of all gateways set at fabric */ + list_for_each_entry(fabgw, &fabric->gateways, list) { + info = fabgw->info; + if (!info) + continue; + info->link_info.sel_clk = &info->link_info.clk[ctx]; + max_pclk = max(max_pclk, *info->link_info.sel_clk); + } + MSM_BUS_DBG("max_pclk from gateways: %llu\n", max_pclk); + + /* Maximum of all slave clocks. */ + + for (i = 0; i < fabric->pdata->len; i++) { + if (fabric->pdata->info[i].gateway || + (fabric->pdata->info[i].id < SLAVE_ID_KEY)) + continue; + info = radix_tree_lookup(&fabric->fab_tree, + fabric->pdata->info[i].priv_id); + if (!info) + continue; + info->link_info.sel_clk = &info->link_info.clk[ctx]; + max_pclk = max(max_pclk, *info->link_info.sel_clk); + } + + + MSM_BUS_DBG("max_pclk from slaves & gws: %llu\n", max_pclk); + fabric->info.link_info.sel_clk = + &fabric->info.link_info.clk[ctx]; + pclk = fabric->info.link_info.sel_clk; + } else { + slave->link_info.sel_clk = &slave->link_info.clk[ctx]; + pclk = slave->link_info.sel_clk; + } + + + *pclk = max(max_pclk, max(bwsum_hz, req_clk_hz)); + + if (!fabric->pdata->rpm_enabled) + goto skip_set_clks; + + if (clk_flag) { + nodeclk = &fabric->info.nodeclk[ctx]; + if (nodeclk->clk) { + MSM_BUS_DBG("clks: id: %d set-clk: %llu bws_hz:%llu\n", + fabric->fabdev.id, *pclk, bwsum_hz); + if (nodeclk->rate != *pclk) { + nodeclk->dirty = true; + nodeclk->rate = *pclk; + } + fabric->clk_dirty = true; + } + } else { + nodeclk = &slave->nodeclk[ctx]; + if (nodeclk->clk) { + rate = *pclk; + MSM_BUS_DBG("clks: id: %d set-clk: %llu bws_hz: %llu\n", + slave->node_info->priv_id, rate, + bwsum_hz); + if (nodeclk->rate != rate) { + nodeclk->dirty = true; + nodeclk->rate = rate; + } + } + if (!status && slave->memclk[ctx].clk) { + rate = *slave->link_info.sel_clk; + if (slave->memclk[ctx].rate != rate) { + slave->memclk[ctx].rate = rate; + slave->memclk[ctx].dirty = true; + } + slave->memclk[ctx].rate = rate; + fabric->clk_dirty = true; + } + } +skip_set_clks: + return status; +} + +void msm_bus_fabric_update_bw(struct msm_bus_fabric_device *fabdev, + struct msm_bus_inode_info *hop, struct msm_bus_inode_info *info, + int64_t add_bw, int *master_tiers, int ctx) +{ + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + void *sel_cdata; + long rounded_rate, cur_rate; + + sel_cdata = fabric->cdata[ctx]; + + /* If it's an ahb fabric, don't calculate arb values */ + if (fabric->ahb) { + MSM_BUS_DBG("AHB fabric, skipping bw calculation\n"); + return; + } + if (!add_bw) { + MSM_BUS_DBG("No bandwidth delta. Skipping commit\n"); + return; + } + + if ((info->node_info->hw_sel != MSM_BUS_RPM) && + fabdev->hw_algo.update_bw_reg(info->node_info->mode)) { + /* Enable clocks before accessing QoS registers */ + if (fabric->info.nodeclk[DUAL_CTX].clk) { + if (fabric->info.nodeclk[DUAL_CTX].rate == 0) { + cur_rate = clk_get_rate( + fabric->info.nodeclk[DUAL_CTX].clk); + rounded_rate = clk_round_rate( + fabric->info.nodeclk[DUAL_CTX].clk, + cur_rate ? cur_rate : 1); + if (clk_set_rate( + fabric->info.nodeclk[DUAL_CTX].clk, + rounded_rate)) + MSM_BUS_ERR("clk en:Node:%d rate:%ld", + fabric->fabdev.id, rounded_rate); + + clk_prepare_enable( + fabric->info.nodeclk[DUAL_CTX].clk); + } + } + + if (info->iface_clk.clk) + clk_prepare_enable(info->iface_clk.clk); + + if (hop->iface_clk.clk) + clk_prepare_enable(hop->iface_clk.clk); + } + + fabdev->hw_algo.update_bw(hop, info, fabric->pdata, sel_cdata, + master_tiers, add_bw); + + if ((info->node_info->hw_sel != MSM_BUS_RPM) && + fabdev->hw_algo.update_bw_reg(info->node_info->mode)) { + /* Disable clocks after accessing QoS registers */ + if (fabric->info.nodeclk[DUAL_CTX].clk && + fabric->info.nodeclk[DUAL_CTX].rate == 0) + clk_disable_unprepare( + fabric->info.nodeclk[DUAL_CTX].clk); + + if (info->iface_clk.clk) { + MSM_BUS_DBG("Commented:Will disable clk for info:%d\n", + info->node_info->priv_id); + clk_disable_unprepare(info->iface_clk.clk); + } + + if (hop->iface_clk.clk) { + MSM_BUS_DBG("Commented Will disable clk for hop:%d\n", + hop->node_info->priv_id); + clk_disable_unprepare(hop->iface_clk.clk); + } + } + + fabric->arb_dirty = true; +} + +static int msm_bus_fabric_clk_set(int enable, struct msm_bus_inode_info *info) +{ + int i, status = 0; + long rounded_rate; + + for (i = 0; i < NUM_CTX; i++) { + if (info->nodeclk[i].dirty) { + if (info->nodeclk[i].rate != 0) { + rounded_rate = clk_round_rate(info-> + nodeclk[i].clk, info->nodeclk[i].rate); + status = clk_set_rate(info->nodeclk[i].clk, + rounded_rate); + MSM_BUS_DBG("AXI: node: %d set_rate: %ld\n", + info->node_info->id, rounded_rate); + } + + if (enable && !(info->nodeclk[i].enable)) { + clk_prepare_enable(info->nodeclk[i].clk); + info->nodeclk[i].dirty = false; + info->nodeclk[i].enable = true; + } else if ((info->nodeclk[i].rate == 0) && (!enable) + && (info->nodeclk[i].enable)) { + clk_disable_unprepare(info->nodeclk[i].clk); + info->nodeclk[i].dirty = false; + info->nodeclk[i].enable = false; + } + } + + if (info->memclk[i].dirty) { + if (info->nodeclk[i].rate != 0) { + rounded_rate = clk_round_rate(info-> + memclk[i].clk, info->memclk[i].rate); + status = clk_set_rate(info->memclk[i].clk, + rounded_rate); + MSM_BUS_DBG("AXI: node: %d set_rate: %ld\n", + info->node_info->id, rounded_rate); + } + + if (enable && !(info->memclk[i].enable)) { + clk_prepare_enable(info->memclk[i].clk); + info->memclk[i].dirty = false; + info->memclk[i].enable = true; + } else if (info->memclk[i].rate == 0 && (!enable) && + (info->memclk[i].enable)) { + clk_disable_unprepare(info->memclk[i].clk); + info->memclk[i].dirty = false; + info->memclk[i].enable = false; + } + } + } + + return status; +} + +/** + * msm_bus_fabric_clk_commit() - Call clock enable and update clock + * values. +*/ +static int msm_bus_fabric_clk_commit(int enable, struct msm_bus_fabric *fabric) +{ + unsigned int i, nfound = 0, status = 0; + struct msm_bus_inode_info *info[fabric->pdata->nslaves]; + + if (fabric->clk_dirty == true) + status = msm_bus_fabric_clk_set(enable, &fabric->info); + + if (status) + MSM_BUS_WARN("Error setting clocks on fabric: %d\n", + fabric->fabdev.id); + + nfound = radix_tree_gang_lookup_tag(&fabric->fab_tree, (void **)&info, + fabric->fabdev.id, fabric->pdata->nslaves, CLK_NODE); + if (nfound == 0) { + MSM_BUS_DBG("No clock nodes found for fabric: %d\n", + fabric->fabdev.id); + goto out; + } + + for (i = 0; i < nfound; i++) { + status = msm_bus_fabric_clk_set(enable, info[i]); + if (status) + MSM_BUS_WARN("Error setting clocks for node: %d\n", + info[i]->node_info->id); + } + +out: + return status; +} + +static void msm_bus_fabric_config_limiter( + struct msm_bus_fabric_device *fabdev, + struct msm_bus_inode_info *info) +{ + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + long rounded_rate, cur_rate; + + if (fabdev->hw_algo.config_limiter == NULL) + return; + + /* Enable clocks before accessing QoS registers */ + if (fabric->info.nodeclk[DUAL_CTX].clk) { + if (fabric->info.nodeclk[DUAL_CTX].rate == 0) { + cur_rate = clk_get_rate( + fabric->info.nodeclk[DUAL_CTX].clk); + rounded_rate = clk_round_rate( + fabric->info.nodeclk[DUAL_CTX].clk, + cur_rate ? cur_rate : 1); + if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk, + rounded_rate)) + MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld", + fabric->fabdev.id, rounded_rate); + + clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk); + } + } + + if (info->iface_clk.clk) + clk_prepare_enable(info->iface_clk.clk); + + fabdev->hw_algo.config_limiter(fabric->pdata, info); + + /* Disable clocks after accessing QoS registers */ + if (fabric->info.nodeclk[DUAL_CTX].clk && + fabric->info.nodeclk[DUAL_CTX].rate == 0) + clk_disable_unprepare(fabric->info.nodeclk[DUAL_CTX].clk); + + if (info->iface_clk.clk) { + MSM_BUS_DBG("Commented: Will disable clock for info: %d\n", + info->node_info->priv_id); + clk_disable_unprepare(info->iface_clk.clk); + } +} + +static void msm_bus_fabric_config_master( + struct msm_bus_fabric_device *fabdev, + struct msm_bus_inode_info *info, uint64_t req_clk, uint64_t req_bw) +{ + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + long rounded_rate, cur_rate; + + if (fabdev->hw_algo.config_master == NULL) + return; + + /* Enable clocks before accessing QoS registers */ + if (fabric->info.nodeclk[DUAL_CTX].clk) { + if (fabric->info.nodeclk[DUAL_CTX].rate == 0) { + cur_rate = clk_get_rate( + fabric->info.nodeclk[DUAL_CTX].clk); + rounded_rate = clk_round_rate( + fabric->info.nodeclk[DUAL_CTX].clk, + cur_rate ? cur_rate : 1); + if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk, + rounded_rate)) + MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld", + fabric->fabdev.id, rounded_rate); + + clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk); + } + } + + if (info->iface_clk.clk) + clk_prepare_enable(info->iface_clk.clk); + + fabdev->hw_algo.config_master(fabric->pdata, info, req_clk, req_bw); + + /* Disable clocks after accessing QoS registers */ + if (fabric->info.nodeclk[DUAL_CTX].clk && + fabric->info.nodeclk[DUAL_CTX].rate == 0) + clk_disable_unprepare(fabric->info.nodeclk[DUAL_CTX].clk); + + if (info->iface_clk.clk) { + MSM_BUS_DBG("Commented: Will disable clock for info: %d\n", + info->node_info->priv_id); + clk_disable_unprepare(info->iface_clk.clk); + } +} + +/** + * msm_bus_fabric_hw_commit() - Commit the arbitration data to Hardware. + * @fabric: Fabric for which the data should be committed + * */ +static int msm_bus_fabric_hw_commit(struct msm_bus_fabric_device *fabdev) +{ + int status = 0; + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + + /* + * For a non-zero bandwidth request, clocks should be enabled before + * sending the arbitration data to RPM, but should be disabled only + * after commiting the data. + */ + status = msm_bus_fabric_clk_commit(ENABLE, fabric); + if (status) + MSM_BUS_DBG("Error setting clocks on fabric: %d\n", + fabric->fabdev.id); + + if (!fabric->arb_dirty) { + MSM_BUS_DBG("Not committing as fabric not arb_dirty\n"); + goto skip_arb; + } + + status = fabdev->hw_algo.commit(fabric->pdata, fabric->hw_data, + (void **)fabric->cdata); + if (status) + MSM_BUS_DBG("Error committing arb data for fabric: %d\n", + fabric->fabdev.id); + + fabric->arb_dirty = false; +skip_arb: + /* + * If the bandwidth request is 0 for a fabric, the clocks + * should be disabled after arbitration data is committed. + */ + status = msm_bus_fabric_clk_commit(DISABLE, fabric); + if (status) + MSM_BUS_WARN("Error disabling clocks on fabric: %d\n", + fabric->fabdev.id); + fabric->clk_dirty = false; + return status; +} + +/** + * msm_bus_fabric_port_halt() - Used to halt a master port + * @fabric: Fabric on which the current master node is present + * @portid: Port id of the master + */ +int msm_bus_fabric_port_halt(struct msm_bus_fabric_device *fabdev, int iid) +{ + struct msm_bus_inode_info *info = NULL; + uint8_t mport; + uint32_t haltid = 0; + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + + info = fabdev->algo->find_node(fabdev, iid); + if (!info) { + MSM_BUS_ERR("Error: Info not found for id: %u", iid); + return -EINVAL; + } + + haltid = fabric->pdata->haltid; + mport = info->node_info->masterp[0]; + + return fabdev->hw_algo.port_halt(haltid, mport); +} + +/** + * msm_bus_fabric_port_unhalt() - Used to unhalt a master port + * @fabric: Fabric on which the current master node is present + * @portid: Port id of the master + */ +int msm_bus_fabric_port_unhalt(struct msm_bus_fabric_device *fabdev, int iid) +{ + struct msm_bus_inode_info *info = NULL; + uint8_t mport; + uint32_t haltid = 0; + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + + info = fabdev->algo->find_node(fabdev, iid); + if (!info) { + MSM_BUS_ERR("Error: Info not found for id: %u", iid); + return -EINVAL; + } + + haltid = fabric->pdata->haltid; + mport = info->node_info->masterp[0]; + return fabdev->hw_algo.port_unhalt(haltid, mport); +} + +/** + * msm_bus_fabric_find_gw_node() - This function finds the gateway node + * attached on a given fabric + * @id: ID of the gateway node + * @fabric: Fabric to find the gateway node on + * Function returns: Pointer to the gateway node + */ +static struct msm_bus_inode_info *msm_bus_fabric_find_gw_node(struct + msm_bus_fabric_device * fabdev, int id) +{ + struct msm_bus_inode_info *info = NULL; + struct msm_bus_fabnodeinfo *fab; + struct msm_bus_fabric *fabric; + if (!fabdev) { + MSM_BUS_ERR("No fabric device found!\n"); + return NULL; + } + + fabric = to_msm_bus_fabric(fabdev); + if (!fabric || IS_ERR(fabric)) { + MSM_BUS_ERR("No fabric type found!\n"); + return NULL; + } + list_for_each_entry(fab, &fabric->gateways, list) { + if (fab->info->node_info->priv_id == id) { + info = fab->info; + break; + } + } + + return info; +} + +static struct msm_bus_inode_info *msm_bus_fabric_find_node(struct + msm_bus_fabric_device * fabdev, int id) +{ + struct msm_bus_inode_info *info = NULL; + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + info = radix_tree_lookup(&fabric->fab_tree, id); + if (!info) + MSM_BUS_ERR("Null info found for id %d\n", id); + return info; +} + +static struct list_head *msm_bus_fabric_get_gw_list(struct msm_bus_fabric_device + *fabdev) +{ + struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev); + if (!fabric || IS_ERR(fabric)) { + MSM_BUS_ERR("No fabric found from fabdev\n"); + return NULL; + } + return &fabric->gateways; + +} +static struct msm_bus_fab_algorithm msm_bus_algo = { + .update_clks = msm_bus_fabric_update_clks, + .update_bw = msm_bus_fabric_update_bw, + .port_halt = msm_bus_fabric_port_halt, + .port_unhalt = msm_bus_fabric_port_unhalt, + .commit = msm_bus_fabric_hw_commit, + .find_node = msm_bus_fabric_find_node, + .find_gw_node = msm_bus_fabric_find_gw_node, + .get_gw_list = msm_bus_fabric_get_gw_list, + .config_master = msm_bus_fabric_config_master, + .config_limiter = msm_bus_fabric_config_limiter, +}; + +static int msm_bus_fabric_hw_init(struct msm_bus_fabric_registration *pdata, + struct msm_bus_hw_algorithm *hw_algo) +{ + int ret = 0; + + switch (pdata->hw_sel) { + case MSM_BUS_NOC: + msm_bus_noc_hw_init(pdata, hw_algo); + break; + case MSM_BUS_BIMC: + msm_bus_bimc_hw_init(pdata, hw_algo); + break; + default: + ret = msm_bus_rpm_hw_init(pdata, hw_algo); + if (ret) { + MSM_BUS_ERR("RPM initialization failed\n"); + ret = -EINVAL; + } + break; + } + return ret; +} + +static int msm_bus_fabric_probe(struct platform_device *pdev) +{ + int ctx, ret = 0; + struct msm_bus_fabric *fabric; + struct msm_bus_fabric_registration *pdata; + + fabric = kzalloc(sizeof(struct msm_bus_fabric), GFP_KERNEL); + if (!fabric) { + MSM_BUS_ERR("Fabric alloc failed\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&fabric->gateways); + INIT_RADIX_TREE(&fabric->fab_tree, GFP_ATOMIC); + fabric->num_nodes = 0; + fabric->fabdev.visited = false; + + fabric->info.node_info = kzalloc(sizeof(struct msm_bus_node_info), + GFP_KERNEL); + if (ZERO_OR_NULL_PTR(fabric->info.node_info)) { + MSM_BUS_ERR("Fabric node info alloc failed\n"); + kfree(fabric); + return -ENOMEM; + } + + fabric->info.num_pnodes = -1; + fabric->info.link_info.clk[DUAL_CTX] = 0; + fabric->info.link_info.bw[DUAL_CTX] = 0; + fabric->info.link_info.clk[ACTIVE_CTX] = 0; + fabric->info.link_info.bw[ACTIVE_CTX] = 0; + + /* If possible, get pdata from device-tree */ + if (pdev->dev.of_node) { + pdata = msm_bus_of_get_fab_data(pdev); + if (IS_ERR(pdata) || ZERO_OR_NULL_PTR(pdata)) { + pr_err("Null platform data\n"); + kfree(fabric->info.node_info); + kfree(fabric); + return PTR_ERR(pdata); + } + msm_bus_board_init(pdata); + fabric->fabdev.id = pdata->id; + msm_bus_of_get_nfab(pdev, pdata); + } else { + pdata = (struct msm_bus_fabric_registration *)pdev-> + dev.platform_data; + fabric->fabdev.id = pdev->id; + } + + fabric->fabdev.name = pdata->name; + fabric->fabdev.nr_lim_thresh = pdata->nr_lim_thresh; + fabric->fabdev.eff_fact = pdata->eff_fact; + fabric->fabdev.algo = &msm_bus_algo; + fabric->info.node_info->priv_id = fabric->fabdev.id; + fabric->info.node_info->id = fabric->fabdev.id; + ret = msm_bus_fabric_hw_init(pdata, &fabric->fabdev.hw_algo); + if (ret) { + MSM_BUS_ERR("Error initializing hardware for fabric: %d\n", + fabric->fabdev.id); + goto err; + } + + fabric->ahb = pdata->ahb; + fabric->pdata = pdata; + fabric->pdata->board_algo->assign_iids(fabric->pdata, + fabric->fabdev.id); + fabric->fabdev.board_algo = fabric->pdata->board_algo; + + /* + * clk and bw for fabric->info will contain the max bw and clk + * it will allow. This info will come from the boards file. + */ + ret = msm_bus_fabric_device_register(&fabric->fabdev); + if (ret) { + MSM_BUS_ERR("Error registering fabric %d ret %d\n", + fabric->fabdev.id, ret); + goto err; + } + + for (ctx = 0; ctx < NUM_CTX; ctx++) { + if (pdata->fabclk[ctx]) { + fabric->info.nodeclk[ctx].clk = clk_get( + &fabric->fabdev.dev, pdata->fabclk[ctx]); + if (IS_ERR(fabric->info.nodeclk[ctx].clk)) { + MSM_BUS_ERR("Couldn't get clock %s\n", + pdata->fabclk[ctx]); + ret = -EINVAL; + goto err; + } + fabric->info.nodeclk[ctx].enable = false; + fabric->info.nodeclk[ctx].dirty = false; + } + } + + /* Find num. of slaves, masters, populate gateways, radix tree */ + ret = register_fabric_info(pdev, fabric); + if (ret) { + MSM_BUS_ERR("Could not register fabric %d info, ret: %d\n", + fabric->fabdev.id, ret); + goto err; + } + if (!fabric->ahb) { + /* Allocate memory for commit data */ + for (ctx = 0; ctx < NUM_CTX; ctx++) { + ret = fabric->fabdev.hw_algo.allocate_commit_data( + fabric->pdata, &fabric->cdata[ctx], ctx); + if (ret) { + MSM_BUS_ERR("Failed to alloc commit data for " + "fab: %d, ret = %d\n", + fabric->fabdev.id, ret); + goto err; + } + } + } + + if (msmbus_coresight_init(pdev)) + pr_warn("Coresight support absent for bus: %d\n", pdata->id); + + return ret; +err: + kfree(fabric->info.node_info); + kfree(fabric); + return ret; +} + +static int msm_bus_fabric_remove(struct platform_device *pdev) +{ + struct msm_bus_fabric_device *fabdev = NULL; + struct msm_bus_fabric *fabric; + int i; + int ret = 0; + + fabdev = platform_get_drvdata(pdev); + msmbus_coresight_remove(pdev); + msm_bus_fabric_device_unregister(fabdev); + fabric = to_msm_bus_fabric(fabdev); + msm_bus_dbg_commit_data(fabric->fabdev.name, NULL, 0, 0, 0, + MSM_BUS_DBG_UNREGISTER); + for (i = 0; i < fabric->pdata->nmasters; i++) + radix_tree_delete(&fabric->fab_tree, fabric->fabdev.id + i); + for (i = (fabric->fabdev.id + SLAVE_ID_KEY); i < + fabric->pdata->nslaves; i++) + radix_tree_delete(&fabric->fab_tree, i); + if (!fabric->ahb) { + fabdev->hw_algo.free_commit_data(fabric->cdata[DUAL_CTX]); + fabdev->hw_algo.free_commit_data(fabric->cdata[ACTIVE_CTX]); + } + + kfree(fabric->info.node_info); + kfree(fabric->hw_data); + kfree(fabric); + return ret; +} + +static struct of_device_id fabric_match[] = { + {.compatible = "msm-bus-fabric"}, + {} +}; + +static struct platform_driver msm_bus_fabric_driver = { + .probe = msm_bus_fabric_probe, + .remove = msm_bus_fabric_remove, + .driver = { + .name = "msm_bus_fabric", + .owner = THIS_MODULE, + .of_match_table = fabric_match, + }, +}; + +int __init msm_bus_fabric_init_driver(void) +{ + static bool initialized; + + if (initialized) + return 0; + else + initialized = true; + + MSM_BUS_ERR("msm_bus_fabric_init_driver\n"); + msm_bus_arb_setops_legacy(&arb_ops); + return platform_driver_register(&msm_bus_fabric_driver); +} +EXPORT_SYMBOL(msm_bus_fabric_init_driver); +subsys_initcall(msm_bus_fabric_init_driver); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c new file mode 100644 index 000000000000..eb369aa8df76 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c @@ -0,0 +1,1262 @@ +/* Copyright (c) 2014, Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/soc/qcom/smd-rpm.h> +#include "msm_bus_core.h" +#include "msm_bus_adhoc.h" +#include "msm_bus_noc.h" +#include "msm_bus_bimc.h" + +struct static_rules_type { + int num_rules; + struct bus_rule_type *rules; +}; + +static struct static_rules_type static_rules; + +static int enable_nodeclk(struct nodeclk *nclk) +{ + int ret = 0; + + if (!nclk->enable) { + ret = clk_prepare_enable(nclk->clk); + + if (ret) { + MSM_BUS_ERR("%s: failed to enable clk ", __func__); + nclk->enable = false; + } else + nclk->enable = true; + } + return ret; +} + +static int disable_nodeclk(struct nodeclk *nclk) +{ + int ret = 0; + + if (nclk->enable) { + clk_disable_unprepare(nclk->clk); + nclk->enable = false; + } + return ret; +} + +static int setrate_nodeclk(struct nodeclk *nclk, long rate) +{ + int ret = 0; + + ret = clk_set_rate(nclk->clk, rate); + + if (ret) + MSM_BUS_ERR("%s: failed to setrate clk", __func__); + return ret; +} + +static int msm_bus_agg_fab_clks(struct device *bus_dev, void *data) +{ + struct msm_bus_node_device_type *node = NULL; + int ret = 0; + int ctx = *(int *)data; + + if (ctx >= NUM_CTX) { + MSM_BUS_ERR("%s: Invalid Context %d", __func__, ctx); + goto exit_agg_fab_clks; + } + + node = bus_dev->platform_data; + if (!node) { + MSM_BUS_ERR("%s: Can't get device info", __func__); + goto exit_agg_fab_clks; + } + + if (!node->node_info->is_fab_dev) { + struct msm_bus_node_device_type *bus_dev = NULL; + + bus_dev = node->node_info->bus_device->platform_data; + + if (node->cur_clk_hz[ctx] >= bus_dev->cur_clk_hz[ctx]) + bus_dev->cur_clk_hz[ctx] = node->cur_clk_hz[ctx]; + } + +exit_agg_fab_clks: + return ret; +} + +static int msm_bus_reset_fab_clks(struct device *bus_dev, void *data) +{ + struct msm_bus_node_device_type *node = NULL; + int ret = 0; + int ctx = *(int *)data; + + if (ctx >= NUM_CTX) { + MSM_BUS_ERR("%s: Invalid Context %d", __func__, ctx); + goto exit_reset_fab_clks; + } + + node = bus_dev->platform_data; + if (!node) { + MSM_BUS_ERR("%s: Can't get device info", __func__); + goto exit_reset_fab_clks; + } + + if (node->node_info->is_fab_dev) { + node->cur_clk_hz[ctx] = 0; + MSM_BUS_DBG("Resetting for node %d", node->node_info->id); + } +exit_reset_fab_clks: + return ret; +} + + +static int send_rpm_msg(struct device *device) +{ + int ret = 0; + int ctx; + int rsc_type; + struct msm_bus_node_device_type *ndev = + device->platform_data; + struct qcom_msm_bus_req req = { + .nbytes = sizeof(uint64_t), + .key = RPM_MASTER_FIELD_BW, + }; + + if (!ndev) { + MSM_BUS_ERR("%s: Error getting node info.", __func__); + ret = -ENODEV; + goto exit_send_rpm_msg; + } + + for (ctx = QCOM_SMD_RPM_ACTIVE_STATE; ctx <= QCOM_SMD_RPM_SLEEP_STATE; + ctx++) { + if (ctx == QCOM_SMD_RPM_ACTIVE_STATE) + req.value = + ndev->node_ab.ab[QCOM_SMD_RPM_ACTIVE_STATE]; + else { + req.value = + ndev->node_ab.ab[QCOM_SMD_RPM_SLEEP_STATE]; + } + + if (ndev->node_info->mas_rpm_id != -1) { + rsc_type = RPM_BUS_MASTER_REQ; + ret = qcom_rpm_bus_send_message(ctx, rsc_type, + ndev->node_info->mas_rpm_id, &req); + if (ret) { + MSM_BUS_ERR("%s: Failed to send RPM message:", + __func__); + MSM_BUS_ERR("%s:Node Id %d RPM id %d", + __func__, ndev->node_info->id, + ndev->node_info->mas_rpm_id); + goto exit_send_rpm_msg; + } + } + + if (ndev->node_info->slv_rpm_id != -1) { + rsc_type = RPM_BUS_SLAVE_REQ; + ret = qcom_rpm_bus_send_message(ctx, rsc_type, + ndev->node_info->slv_rpm_id, &req); + if (ret) { + MSM_BUS_ERR("%s: Failed to send RPM message:", + __func__); + MSM_BUS_ERR("%s: Node Id %d RPM id %d", + __func__, ndev->node_info->id, + ndev->node_info->slv_rpm_id); + goto exit_send_rpm_msg; + } + } + } +exit_send_rpm_msg: + return ret; +} + +static int flush_bw_data(struct device *node_device, int ctx) +{ + struct msm_bus_node_device_type *node_info; + int ret = 0; + + node_info = node_device->platform_data; + if (!node_info) { + MSM_BUS_ERR("%s: Unable to find bus device for device %d", + __func__, node_info->node_info->id); + ret = -ENODEV; + goto exit_flush_bw_data; + } + + if (node_info->node_ab.dirty) { + if (node_info->ap_owned) { + struct msm_bus_node_device_type *bus_device = + node_info->node_info->bus_device->platform_data; + struct msm_bus_fab_device_type *fabdev = + bus_device->fabdev; + + if (fabdev && fabdev->noc_ops.update_bw_reg && + fabdev->noc_ops.update_bw_reg + (node_info->node_info->qos_params.mode)) + ret = fabdev->noc_ops.set_bw(node_info, + fabdev->qos_base, + fabdev->base_offset, + fabdev->qos_off, + fabdev->qos_freq); + } else { + ret = send_rpm_msg(node_device); + + if (ret) + MSM_BUS_ERR("%s: Failed to send RPM msg for%d", + __func__, node_info->node_info->id); + } + node_info->node_ab.dirty = false; + } + +exit_flush_bw_data: + return ret; + +} + +static int flush_clk_data(struct device *node_device, int ctx) +{ + struct msm_bus_node_device_type *node; + struct nodeclk *nodeclk = NULL; + int ret = 0; + + node = node_device->platform_data; + if (!node) { + MSM_BUS_ERR("Unable to find bus device"); + ret = -ENODEV; + goto exit_flush_clk_data; + } + + nodeclk = &node->clk[ctx]; + if (node->node_info->is_fab_dev) { + if (nodeclk->rate != node->cur_clk_hz[ctx]) { + nodeclk->rate = node->cur_clk_hz[ctx]; + nodeclk->dirty = true; + } + } + + if (nodeclk && nodeclk->clk && nodeclk->dirty) { + long rounded_rate; + + if (nodeclk->rate) { + rounded_rate = clk_round_rate(nodeclk->clk, + nodeclk->rate); + ret = setrate_nodeclk(nodeclk, rounded_rate); + + if (ret) { + MSM_BUS_ERR("%s: Failed to set_rate %lu for %d", + __func__, rounded_rate, + node->node_info->id); + ret = -ENODEV; + goto exit_flush_clk_data; + } + + ret = enable_nodeclk(nodeclk); + + if ((node->node_info->is_fab_dev) && + !IS_ERR_OR_NULL(node->qos_clk.clk)) + ret = enable_nodeclk(&node->qos_clk); + } else { + if ((node->node_info->is_fab_dev) && + !IS_ERR_OR_NULL(node->qos_clk.clk)) + ret = disable_nodeclk(&node->qos_clk); + + ret = disable_nodeclk(nodeclk); + } + + if (ret) { + MSM_BUS_ERR("%s: Failed to enable for %d", __func__, + node->node_info->id); + ret = -ENODEV; + goto exit_flush_clk_data; + } + MSM_BUS_DBG("%s: Updated %d clk to %llu", __func__, + node->node_info->id, nodeclk->rate); + + } +exit_flush_clk_data: + /* Reset the aggregated clock rate for fab devices*/ + if (node && node->node_info->is_fab_dev) + node->cur_clk_hz[ctx] = 0; + + if (nodeclk) + nodeclk->dirty = 0; + return ret; +} + +int msm_bus_commit_data(int *dirty_nodes, int ctx, int num_dirty) +{ + int ret = 0; + int i = 0; + + /* Aggregate the bus clocks */ + bus_for_each_dev(&msm_bus_type, NULL, (void *)&ctx, + msm_bus_agg_fab_clks); + + for (i = 0; i < num_dirty; i++) { + struct device *node_device = + bus_find_device(&msm_bus_type, NULL, + (void *)&dirty_nodes[i], + msm_bus_device_match_adhoc); + + if (!node_device) { + MSM_BUS_ERR("Can't find device for %d", dirty_nodes[i]); + continue; + } + + ret = flush_bw_data(node_device, ctx); + if (ret) + MSM_BUS_ERR("%s: Error flushing bw data for node %d", + __func__, dirty_nodes[i]); + + ret = flush_clk_data(node_device, ctx); + if (ret) + MSM_BUS_ERR("%s: Error flushing clk data for node %d", + __func__, dirty_nodes[i]); + } + kfree(dirty_nodes); + /* Aggregate the bus clocks */ + bus_for_each_dev(&msm_bus_type, NULL, (void *)&ctx, + msm_bus_reset_fab_clks); + return ret; +} + +void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size, + size_t new_size, gfp_t flags) +{ + void *ret; + size_t copy_size = old_size; + + if (!new_size) { + devm_kfree(dev, p); + return ZERO_SIZE_PTR; + } + + if (new_size < old_size) + copy_size = new_size; + + ret = devm_kzalloc(dev, new_size, flags); + if (!ret) { + MSM_BUS_ERR("%s: Error Reallocating memory", __func__); + goto exit_realloc_devmem; + } + + memcpy(ret, p, copy_size); + devm_kfree(dev, p); +exit_realloc_devmem: + return ret; +} + + +static int add_dirty_node(int **dirty_nodes, int id, int *num_dirty) +{ + int i; + int found = 0; + int ret = 0; + int *dnode = NULL; + + for (i = 0; i < *num_dirty; i++) { + if ((*dirty_nodes)[i] == id) { + found = 1; + break; + } + } + + if (!found) { + (*num_dirty)++; + dnode = + krealloc(*dirty_nodes, sizeof(int) * (*num_dirty), + GFP_KERNEL); + + if (ZERO_OR_NULL_PTR(dnode)) { + MSM_BUS_ERR("%s: Failure allocating dirty nodes array", + __func__); + ret = -ENOMEM; + } else { + *dirty_nodes = dnode; + (*dirty_nodes)[(*num_dirty) - 1] = id; + } + } + + return ret; +} + +int msm_bus_update_bw(struct msm_bus_node_device_type *nodedev, int ctx, + int64_t add_bw, int **dirty_nodes, int *num_dirty) +{ + int ret = 0; + int i, j; + uint64_t cur_ab_slp = 0; + uint64_t cur_ab_act = 0; + + if (nodedev->node_info->virt_dev) + goto exit_update_bw; + + for (i = 0; i < NUM_CTX; i++) { + for (j = 0; j < nodedev->num_lnodes; j++) { + if (i == DUAL_CTX) { + cur_ab_act += + nodedev->lnode_list[j].lnode_ab[i]; + cur_ab_slp += + nodedev->lnode_list[j].lnode_ab[i]; + } else + cur_ab_act += + nodedev->lnode_list[j].lnode_ab[i]; + } + } + + if (nodedev->node_ab.ab[QCOM_SMD_RPM_ACTIVE_STATE] != cur_ab_act) { + nodedev->node_ab.ab[QCOM_SMD_RPM_ACTIVE_STATE] = cur_ab_act; + nodedev->node_ab.ab[QCOM_SMD_RPM_SLEEP_STATE] = cur_ab_slp; + nodedev->node_ab.dirty = true; + ret = add_dirty_node(dirty_nodes, nodedev->node_info->id, + num_dirty); + + if (ret) { + MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__, + nodedev->node_info->id); + goto exit_update_bw; + } + } + +exit_update_bw: + return ret; +} + +int msm_bus_update_clks(struct msm_bus_node_device_type *nodedev, + int ctx, int **dirty_nodes, int *num_dirty) +{ + int status = 0; + struct nodeclk *nodeclk; + struct nodeclk *busclk; + struct msm_bus_node_device_type *bus_info = NULL; + uint64_t req_clk; + + bus_info = nodedev->node_info->bus_device->platform_data; + + if (!bus_info) { + MSM_BUS_ERR("%s: Unable to find bus device for device %d", + __func__, nodedev->node_info->id); + status = -ENODEV; + goto exit_set_clks; + } + + req_clk = nodedev->cur_clk_hz[ctx]; + busclk = &bus_info->clk[ctx]; + + if (busclk->rate != req_clk) { + busclk->rate = req_clk; + busclk->dirty = 1; + MSM_BUS_DBG("%s: Modifying bus clk %d Rate %llu", __func__, + bus_info->node_info->id, req_clk); + status = add_dirty_node(dirty_nodes, bus_info->node_info->id, + num_dirty); + + if (status) { + MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__, + bus_info->node_info->id); + goto exit_set_clks; + } + } + + req_clk = nodedev->cur_clk_hz[ctx]; + nodeclk = &nodedev->clk[ctx]; + + if (IS_ERR_OR_NULL(nodeclk)) + goto exit_set_clks; + + if (!nodeclk->dirty || (nodeclk->dirty && (nodeclk->rate < req_clk))) { + nodeclk->rate = req_clk; + nodeclk->dirty = 1; + MSM_BUS_DBG("%s: Modifying node clk %d Rate %llu", __func__, + nodedev->node_info->id, req_clk); + status = add_dirty_node(dirty_nodes, nodedev->node_info->id, + num_dirty); + if (status) { + MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__, + nodedev->node_info->id); + goto exit_set_clks; + } + } + +exit_set_clks: + return status; +} + +static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev) +{ + switch (bus_dev->fabdev->bus_type) { + case MSM_BUS_NOC: + msm_bus_noc_set_ops(bus_dev); + break; + case MSM_BUS_BIMC: + msm_bus_bimc_set_ops(bus_dev); + break; + default: + MSM_BUS_ERR("%s: Invalid Bus type", __func__); + } +} + +static int msm_bus_qos_disable_clk(struct msm_bus_node_device_type *node, + int disable_bus_qos_clk) +{ + struct msm_bus_node_device_type *bus_node = NULL; + int ret = 0; + + if (!node) { + ret = -ENXIO; + goto exit_disable_qos_clk; + } + + bus_node = node->node_info->bus_device->platform_data; + + if (!bus_node) { + ret = -ENXIO; + goto exit_disable_qos_clk; + } + + if (disable_bus_qos_clk) + ret = disable_nodeclk(&bus_node->clk[DUAL_CTX]); + + if (ret) { + MSM_BUS_ERR("%s: Failed to disable bus clk, node %d", + __func__, node->node_info->id); + goto exit_disable_qos_clk; + } + + if (!IS_ERR_OR_NULL(node->qos_clk.clk)) { + ret = disable_nodeclk(&node->qos_clk); + + if (ret) { + MSM_BUS_ERR("%s: Failed to disable mas qos clk,node %d", + __func__, node->node_info->id); + goto exit_disable_qos_clk; + } + } + +exit_disable_qos_clk: + return ret; +} + +static int msm_bus_qos_enable_clk(struct msm_bus_node_device_type *node) +{ + struct msm_bus_node_device_type *bus_node = NULL; + long rounded_rate; + int ret = 0; + int bus_qos_enabled = 0; + + if (!node) { + ret = -ENXIO; + goto exit_enable_qos_clk; + } + + bus_node = node->node_info->bus_device->platform_data; + + if (!bus_node) { + ret = -ENXIO; + goto exit_enable_qos_clk; + } + + /* Check if the bus clk is already set before trying to set it + * Do this only during + * a. Bootup + * b. Only for bus clks + **/ + if (!clk_get_rate(bus_node->clk[DUAL_CTX].clk)) { + rounded_rate = clk_round_rate(bus_node->clk[DUAL_CTX].clk, 1); + ret = setrate_nodeclk(&bus_node->clk[DUAL_CTX], rounded_rate); + if (ret) { + MSM_BUS_ERR("%s: Failed to set bus clk, node %d", + __func__, node->node_info->id); + goto exit_enable_qos_clk; + } + } + + ret = enable_nodeclk(&bus_node->clk[DUAL_CTX]); + if (ret) { + MSM_BUS_ERR("%s: Failed to enable bus clk, node %d", + __func__, node->node_info->id); + goto exit_enable_qos_clk; + } + bus_qos_enabled = 1; + + if (!IS_ERR_OR_NULL(bus_node->qos_clk.clk)) { + ret = enable_nodeclk(&bus_node->qos_clk); + if (ret) { + MSM_BUS_ERR("%s: Failed to enable bus QOS clk, node %d", + __func__, node->node_info->id); + goto exit_enable_qos_clk; + } + } + + if (!IS_ERR_OR_NULL(node->qos_clk.clk)) { + rounded_rate = clk_round_rate(node->qos_clk.clk, 1); + ret = setrate_nodeclk(&node->qos_clk, rounded_rate); + if (ret) { + MSM_BUS_ERR("%s: Failed to enable mas qos clk, node %d", + __func__, node->node_info->id); + goto exit_enable_qos_clk; + } + + ret = enable_nodeclk(&node->qos_clk); + if (ret) { + MSM_BUS_ERR("Err enable mas qos clk, node %d ret %d", + node->node_info->id, ret); + goto exit_enable_qos_clk; + } + } + ret = bus_qos_enabled; + +exit_enable_qos_clk: + return ret; +} + +int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev, + bool enable, uint64_t lim_bw) +{ + int ret = 0; + struct msm_bus_node_device_type *bus_node_dev; + + if (!node_dev) { + MSM_BUS_ERR("No device specified"); + ret = -ENXIO; + goto exit_enable_limiter; + } + + if (!node_dev->ap_owned) { + MSM_BUS_ERR("Device is not AP owned %d.", + node_dev->node_info->id); + ret = -ENXIO; + goto exit_enable_limiter; + } + + bus_node_dev = node_dev->node_info->bus_device->platform_data; + if (!bus_node_dev) { + MSM_BUS_ERR("Unable to get bus device infofor %d", + node_dev->node_info->id); + ret = -ENXIO; + goto exit_enable_limiter; + } + if (bus_node_dev->fabdev && + bus_node_dev->fabdev->noc_ops.limit_mport) { + if (ret < 0) { + MSM_BUS_ERR("Can't Enable QoS clk %d", + node_dev->node_info->id); + goto exit_enable_limiter; + } + bus_node_dev->fabdev->noc_ops.limit_mport( + node_dev, + bus_node_dev->fabdev->qos_base, + bus_node_dev->fabdev->base_offset, + bus_node_dev->fabdev->qos_off, + bus_node_dev->fabdev->qos_freq, + enable, lim_bw); + } + +exit_enable_limiter: + return ret; +} + +static int msm_bus_dev_init_qos(struct device *dev, void *data) +{ + int ret = 0; + struct msm_bus_node_device_type *node_dev = NULL; + + node_dev = dev->platform_data; + + if (!node_dev) { + MSM_BUS_ERR("%s: Unable to get node device info" , __func__); + ret = -ENXIO; + goto exit_init_qos; + } + + MSM_BUS_DBG("Device = %d", node_dev->node_info->id); + + if (node_dev->ap_owned) { + struct msm_bus_node_device_type *bus_node_info; + + bus_node_info = node_dev->node_info->bus_device->platform_data; + + if (!bus_node_info) { + MSM_BUS_ERR("%s: Unable to get bus device infofor %d", + __func__, + node_dev->node_info->id); + ret = -ENXIO; + goto exit_init_qos; + } + + if (bus_node_info->fabdev && + bus_node_info->fabdev->noc_ops.qos_init) { + int ret = 0; + + if (node_dev->ap_owned && + (node_dev->node_info->qos_params.mode) != -1) { + + if (bus_node_info->fabdev->bypass_qos_prg) + goto exit_init_qos; + + ret = msm_bus_qos_enable_clk(node_dev); + if (ret < 0) { + MSM_BUS_ERR("Can't Enable QoS clk %d", + node_dev->node_info->id); + goto exit_init_qos; + } + + bus_node_info->fabdev->noc_ops.qos_init( + node_dev, + bus_node_info->fabdev->qos_base, + bus_node_info->fabdev->base_offset, + bus_node_info->fabdev->qos_off, + bus_node_info->fabdev->qos_freq); + ret = msm_bus_qos_disable_clk(node_dev, ret); + } + } else + MSM_BUS_ERR("%s: Skipping QOS init for %d", + __func__, node_dev->node_info->id); + } +exit_init_qos: + return ret; +} + +static int msm_bus_fabric_init(struct device *dev, + struct msm_bus_node_device_type *pdata) +{ + struct msm_bus_fab_device_type *fabdev; + struct msm_bus_node_device_type *node_dev = NULL; + int ret = 0; + + node_dev = dev->platform_data; + if (!node_dev) { + MSM_BUS_ERR("%s: Unable to get bus device info" , __func__); + ret = -ENXIO; + goto exit_fabric_init; + } + + if (node_dev->node_info->virt_dev) { + MSM_BUS_ERR("%s: Skip Fab init for virtual device %d", __func__, + node_dev->node_info->id); + goto exit_fabric_init; + } + + fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type), + GFP_KERNEL); + if (!fabdev) { + MSM_BUS_ERR("Fabric alloc failed\n"); + ret = -ENOMEM; + goto exit_fabric_init; + } + + node_dev->fabdev = fabdev; + fabdev->pqos_base = pdata->fabdev->pqos_base; + fabdev->qos_range = pdata->fabdev->qos_range; + fabdev->base_offset = pdata->fabdev->base_offset; + fabdev->qos_off = pdata->fabdev->qos_off; + fabdev->qos_freq = pdata->fabdev->qos_freq; + fabdev->bus_type = pdata->fabdev->bus_type; + fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg; + fabdev->util_fact = pdata->fabdev->util_fact; + fabdev->vrail_comp = pdata->fabdev->vrail_comp; + msm_bus_fab_init_noc_ops(node_dev); + + fabdev->qos_base = devm_ioremap(dev, + fabdev->pqos_base, fabdev->qos_range); + if (!fabdev->qos_base) { + MSM_BUS_ERR("%s: Error remapping address 0x%zx :bus device %d", + __func__, + (size_t)fabdev->pqos_base, node_dev->node_info->id); + ret = -ENOMEM; + goto exit_fabric_init; + } + +exit_fabric_init: + return ret; +} + +static int msm_bus_init_clk(struct device *bus_dev, + struct msm_bus_node_device_type *pdata) +{ + unsigned int ctx; + int ret = 0; + struct msm_bus_node_device_type *node_dev = bus_dev->platform_data; + + for (ctx = 0; ctx < NUM_CTX; ctx++) { + if (!IS_ERR_OR_NULL(pdata->clk[ctx].clk)) { + node_dev->clk[ctx].clk = pdata->clk[ctx].clk; + node_dev->clk[ctx].enable = false; + node_dev->clk[ctx].dirty = false; + MSM_BUS_ERR("%s: Valid node clk node %d ctx %d", + __func__, node_dev->node_info->id, ctx); + } + } + + if (!IS_ERR_OR_NULL(pdata->qos_clk.clk)) { + node_dev->qos_clk.clk = pdata->qos_clk.clk; + node_dev->qos_clk.enable = false; + MSM_BUS_ERR("%s: Valid Iface clk node %d", __func__, + node_dev->node_info->id); + } + + return ret; +} + +static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata, + struct device *bus_dev) +{ + int ret = 0; + struct msm_bus_node_info_type *node_info = NULL; + struct msm_bus_node_info_type *pdata_node_info = NULL; + struct msm_bus_node_device_type *bus_node = NULL; + + bus_node = bus_dev->platform_data; + + if (!bus_node || !pdata) { + ret = -ENXIO; + MSM_BUS_ERR("%s: Invalid pointers pdata %p, bus_node %p", + __func__, pdata, bus_node); + goto exit_copy_node_info; + } + + node_info = bus_node->node_info; + pdata_node_info = pdata->node_info; + + node_info->name = pdata_node_info->name; + node_info->id = pdata_node_info->id; + node_info->bus_device_id = pdata_node_info->bus_device_id; + node_info->mas_rpm_id = pdata_node_info->mas_rpm_id; + node_info->slv_rpm_id = pdata_node_info->slv_rpm_id; + node_info->num_connections = pdata_node_info->num_connections; + node_info->num_blist = pdata_node_info->num_blist; + node_info->num_qports = pdata_node_info->num_qports; + node_info->buswidth = pdata_node_info->buswidth; + node_info->virt_dev = pdata_node_info->virt_dev; + node_info->is_fab_dev = pdata_node_info->is_fab_dev; + node_info->qos_params.mode = pdata_node_info->qos_params.mode; + node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1; + node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0; + node_info->qos_params.prio_lvl = pdata_node_info->qos_params.prio_lvl; + node_info->qos_params.prio_rd = pdata_node_info->qos_params.prio_rd; + node_info->qos_params.prio_wr = pdata_node_info->qos_params.prio_wr; + node_info->qos_params.gp = pdata_node_info->qos_params.gp; + node_info->qos_params.thmp = pdata_node_info->qos_params.thmp; + node_info->qos_params.ws = pdata_node_info->qos_params.ws; + node_info->qos_params.bw_buffer = pdata_node_info->qos_params.bw_buffer; + + node_info->dev_connections = devm_kzalloc(bus_dev, + sizeof(struct device *) * + pdata_node_info->num_connections, + GFP_KERNEL); + if (!node_info->dev_connections) { + MSM_BUS_ERR("%s:Bus dev connections alloc failed\n", __func__); + ret = -ENOMEM; + goto exit_copy_node_info; + } + + node_info->connections = devm_kzalloc(bus_dev, + sizeof(int) * pdata_node_info->num_connections, + GFP_KERNEL); + if (!node_info->connections) { + MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__); + devm_kfree(bus_dev, node_info->dev_connections); + ret = -ENOMEM; + goto exit_copy_node_info; + } + + memcpy(node_info->connections, + pdata_node_info->connections, + sizeof(int) * pdata_node_info->num_connections); + + node_info->black_connections = devm_kzalloc(bus_dev, + sizeof(struct device *) * + pdata_node_info->num_blist, + GFP_KERNEL); + if (!node_info->black_connections) { + MSM_BUS_ERR("%s: Bus black connections alloc failed\n", + __func__); + devm_kfree(bus_dev, node_info->dev_connections); + devm_kfree(bus_dev, node_info->connections); + ret = -ENOMEM; + goto exit_copy_node_info; + } + + node_info->black_listed_connections = devm_kzalloc(bus_dev, + pdata_node_info->num_blist * sizeof(int), + GFP_KERNEL); + if (!node_info->black_listed_connections) { + MSM_BUS_ERR("%s:Bus black list connections alloc failed\n", + __func__); + devm_kfree(bus_dev, node_info->black_connections); + devm_kfree(bus_dev, node_info->dev_connections); + devm_kfree(bus_dev, node_info->connections); + ret = -ENOMEM; + goto exit_copy_node_info; + } + + memcpy(node_info->black_listed_connections, + pdata_node_info->black_listed_connections, + sizeof(int) * pdata_node_info->num_blist); + + node_info->qport = devm_kzalloc(bus_dev, + sizeof(int) * pdata_node_info->num_qports, + GFP_KERNEL); + if (!node_info->qport) { + MSM_BUS_ERR("%s:Bus qport allocation failed\n", __func__); + devm_kfree(bus_dev, node_info->dev_connections); + devm_kfree(bus_dev, node_info->connections); + devm_kfree(bus_dev, node_info->black_listed_connections); + ret = -ENOMEM; + goto exit_copy_node_info; + } + + memcpy(node_info->qport, + pdata_node_info->qport, + sizeof(int) * pdata_node_info->num_qports); + +exit_copy_node_info: + return ret; +} + +static struct device *msm_bus_device_init( + struct msm_bus_node_device_type *pdata) +{ + struct device *bus_dev = NULL; + struct msm_bus_node_device_type *bus_node = NULL; + struct msm_bus_node_info_type *node_info = NULL; + int ret = 0; + + bus_dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!bus_dev) { + MSM_BUS_ERR("%s:Device alloc failed\n", __func__); + bus_dev = NULL; + goto exit_device_init; + } + /** + * Init here so we can use devm calls + */ + device_initialize(bus_dev); + + bus_node = devm_kzalloc(bus_dev, + sizeof(struct msm_bus_node_device_type), GFP_KERNEL); + if (!bus_node) { + MSM_BUS_ERR("%s:Bus node alloc failed\n", __func__); + kfree(bus_dev); + bus_dev = NULL; + goto exit_device_init; + } + + node_info = devm_kzalloc(bus_dev, + sizeof(struct msm_bus_node_info_type), GFP_KERNEL); + if (!node_info) { + MSM_BUS_ERR("%s:Bus node info alloc failed\n", __func__); + devm_kfree(bus_dev, bus_node); + kfree(bus_dev); + bus_dev = NULL; + goto exit_device_init; + } + + bus_node->node_info = node_info; + bus_node->ap_owned = pdata->ap_owned; + bus_dev->platform_data = bus_node; + + if (msm_bus_copy_node_info(pdata, bus_dev) < 0) { + devm_kfree(bus_dev, bus_node); + devm_kfree(bus_dev, node_info); + kfree(bus_dev); + bus_dev = NULL; + goto exit_device_init; + } + + bus_dev->bus = &msm_bus_type; + dev_set_name(bus_dev, bus_node->node_info->name); + + ret = device_add(bus_dev); + if (ret < 0) { + MSM_BUS_ERR("%s: Error registering device %d", + __func__, pdata->node_info->id); + devm_kfree(bus_dev, bus_node); + devm_kfree(bus_dev, node_info->dev_connections); + devm_kfree(bus_dev, node_info->connections); + devm_kfree(bus_dev, node_info->black_connections); + devm_kfree(bus_dev, node_info->black_listed_connections); + devm_kfree(bus_dev, node_info); + kfree(bus_dev); + bus_dev = NULL; + goto exit_device_init; + } + +exit_device_init: + return bus_dev; +} + +static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data) +{ + struct msm_bus_node_device_type *bus_node = NULL; + int ret = 0; + int j; + + bus_node = bus_dev->platform_data; + if (!bus_node) { + MSM_BUS_ERR("%s: Can't get device info", __func__); + ret = -ENODEV; + goto exit_setup_dev_conn; + } + + /* Setup parent bus device for this node */ + if (!bus_node->node_info->is_fab_dev) { + struct device *bus_parent_device = + bus_find_device(&msm_bus_type, NULL, + (void *)&bus_node->node_info->bus_device_id, + msm_bus_device_match_adhoc); + + if (!bus_parent_device) { + MSM_BUS_ERR("%s: Error finding parentdev %d parent %d", + __func__, + bus_node->node_info->id, + bus_node->node_info->bus_device_id); + ret = -ENXIO; + goto exit_setup_dev_conn; + } + bus_node->node_info->bus_device = bus_parent_device; + } + + bus_node->node_info->is_traversed = false; + + for (j = 0; j < bus_node->node_info->num_connections; j++) { + bus_node->node_info->dev_connections[j] = + bus_find_device(&msm_bus_type, NULL, + (void *)&bus_node->node_info->connections[j], + msm_bus_device_match_adhoc); + + if (!bus_node->node_info->dev_connections[j]) { + MSM_BUS_ERR("%s: Error finding conn %d for device %d", + __func__, bus_node->node_info->connections[j], + bus_node->node_info->id); + ret = -ENODEV; + goto exit_setup_dev_conn; + } + } + + for (j = 0; j < bus_node->node_info->num_blist; j++) { + bus_node->node_info->black_connections[j] = + bus_find_device(&msm_bus_type, NULL, + (void *)&bus_node->node_info-> + black_listed_connections[j], + msm_bus_device_match_adhoc); + + if (!bus_node->node_info->black_connections[j]) { + MSM_BUS_ERR("%s: Error finding conn %d for device %d\n", + __func__, bus_node->node_info-> + black_listed_connections[j], + bus_node->node_info->id); + ret = -ENODEV; + goto exit_setup_dev_conn; + } + } + +exit_setup_dev_conn: + return ret; +} + +static int msm_bus_node_debug(struct device *bus_dev, void *data) +{ + int j; + int ret = 0; + struct msm_bus_node_device_type *bus_node = NULL; + + bus_node = bus_dev->platform_data; + if (!bus_node) { + MSM_BUS_ERR("%s: Can't get device info", __func__); + ret = -ENODEV; + goto exit_node_debug; + } + + MSM_BUS_DBG("Device = %d buswidth %u", bus_node->node_info->id, + bus_node->node_info->buswidth); + for (j = 0; j < bus_node->node_info->num_connections; j++) { + struct msm_bus_node_device_type *bdev = + (struct msm_bus_node_device_type *) + bus_node->node_info->dev_connections[j]->platform_data; + MSM_BUS_DBG("\n\t Connection[%d] %d", j, bdev->node_info->id); + } + + if (bus_node->node_info->is_fab_dev) + msm_bus_floor_init(bus_dev); + +exit_node_debug: + return ret; +} + +static int msm_bus_device_probe(struct platform_device *pdev) +{ + unsigned int i, ret; + struct msm_bus_device_node_registration *pdata; + + /* If possible, get pdata from device-tree */ + if (pdev->dev.of_node) + pdata = msm_bus_of_to_pdata(pdev); + else { + pdata = (struct msm_bus_device_node_registration *)pdev-> + dev.platform_data; + } + + if (IS_ERR_OR_NULL(pdata)) { + MSM_BUS_ERR("No platform data found"); + ret = -ENODATA; + goto exit_device_probe; + } + + for (i = 0; i < pdata->num_devices; i++) { + struct device *node_dev = NULL; + + node_dev = msm_bus_device_init(&pdata->info[i]); + + if (!node_dev) { + MSM_BUS_ERR("%s: Error during dev init for %d", + __func__, pdata->info[i].node_info->id); + ret = -ENXIO; + goto exit_device_probe; + } + + ret = msm_bus_init_clk(node_dev, &pdata->info[i]); + /*Is this a fabric device ?*/ + if (pdata->info[i].node_info->is_fab_dev) { + MSM_BUS_DBG("%s: %d is a fab", __func__, + pdata->info[i].node_info->id); + ret = msm_bus_fabric_init(node_dev, &pdata->info[i]); + if (ret) { + MSM_BUS_ERR("%s: Error intializing fab %d", + __func__, pdata->info[i].node_info->id); + goto exit_device_probe; + } + } + } + + ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, + msm_bus_setup_dev_conn); + if (ret) { + MSM_BUS_ERR("%s: Error setting up dev connections", __func__); + goto exit_device_probe; + } + + ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos); + if (ret) { + MSM_BUS_ERR("%s: Error during qos init", __func__); + goto exit_device_probe; + } + + + /* Register the arb layer ops */ + msm_bus_arb_setops_adhoc(&arb_ops); + bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug); + + devm_kfree(&pdev->dev, pdata->info); + devm_kfree(&pdev->dev, pdata); +exit_device_probe: + return ret; +} + +static int msm_bus_device_rules_probe(struct platform_device *pdev) +{ + struct bus_rule_type *rule_data = NULL; + int num_rules = 0; + + num_rules = msm_bus_of_get_static_rules(pdev, &rule_data); + + if (!rule_data) + goto exit_rules_probe; + + msm_rule_register(num_rules, rule_data, NULL); + static_rules.num_rules = num_rules; + static_rules.rules = rule_data; + pdev->dev.platform_data = &static_rules; + +exit_rules_probe: + return 0; +} + +int msm_bus_device_rules_remove(struct platform_device *pdev) +{ + struct static_rules_type *static_rules = NULL; + + static_rules = pdev->dev.platform_data; + if (static_rules) + msm_rule_unregister(static_rules->num_rules, + static_rules->rules, NULL); + return 0; +} + +static int msm_bus_free_dev(struct device *dev, void *data) +{ + struct msm_bus_node_device_type *bus_node = NULL; + + bus_node = dev->platform_data; + + if (bus_node) + MSM_BUS_ERR("\n%s: Removing device %d", __func__, + bus_node->node_info->id); + device_unregister(dev); + return 0; +} + +int msm_bus_device_remove(struct platform_device *pdev) +{ + bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev); + return 0; +} + +static struct of_device_id rules_match[] = { + {.compatible = "qcom,msm-bus-static-bw-rules"}, + {} +}; + +static struct platform_driver msm_bus_rules_driver = { + .probe = msm_bus_device_rules_probe, + .remove = msm_bus_device_rules_remove, + .driver = { + .name = "msm_bus_rules_device", + .owner = THIS_MODULE, + .of_match_table = rules_match, + }, +}; + +static struct of_device_id fabric_match[] = { + {.compatible = "qcom,msm-bus-device"}, + {} +}; + +static struct platform_driver msm_bus_device_driver = { + .probe = msm_bus_device_probe, + .remove = msm_bus_device_remove, + .driver = { + .name = "msm_bus_device", + .owner = THIS_MODULE, + .of_match_table = fabric_match, + }, +}; + +int __init msm_bus_device_init_driver(void) +{ + int rc; + + MSM_BUS_ERR("msm_bus_fabric_init_driver\n"); + rc = platform_driver_register(&msm_bus_device_driver); + + if (rc) { + MSM_BUS_ERR("Failed to register bus device driver"); + return rc; + } + return platform_driver_register(&msm_bus_rules_driver); +} +subsys_initcall(msm_bus_device_init_driver); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_id.c b/drivers/soc/qcom/msm_bus/msm_bus_id.c new file mode 100644 index 000000000000..3238161c2aad --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_id.c @@ -0,0 +1,94 @@ +/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/msm-bus.h> +#include <linux/msm-bus-board.h> +#include "msm_bus_core.h" +#include "msm_bus_noc.h" +#include "msm_bus_bimc.h" + +static uint32_t master_iids[MSM_BUS_MASTER_LAST]; +static uint32_t slave_iids[MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY]; + +static void msm_bus_assign_iids(struct msm_bus_fabric_registration + *fabreg, int fabid) +{ + int i; + for (i = 0; i < fabreg->len; i++) { + if (!fabreg->info[i].gateway) { + fabreg->info[i].priv_id = fabid + fabreg->info[i].id; + if (fabreg->info[i].id < SLAVE_ID_KEY) { + if (fabreg->info[i].id >= MSM_BUS_MASTER_LAST) { + WARN(1, "id %d exceeds array size!\n", + fabreg->info[i].id); + continue; + } + + master_iids[fabreg->info[i].id] = + fabreg->info[i].priv_id; + } else { + if ((fabreg->info[i].id - SLAVE_ID_KEY) >= + (MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY)) { + WARN(1, "id %d exceeds array size!\n", + fabreg->info[i].id); + continue; + } + + slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)] + = fabreg->info[i].priv_id; + } + } else { + fabreg->info[i].priv_id = fabreg->info[i].id; + } + } +} + +static int msm_bus_get_iid(int id) +{ + if ((id < SLAVE_ID_KEY && id >= MSM_BUS_MASTER_LAST) || + id >= MSM_BUS_SLAVE_LAST) { + MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id); + return -EINVAL; + } + + return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] : + slave_iids[id - SLAVE_ID_KEY]), id); +} + +static struct msm_bus_board_algorithm msm_bus_id_algo = { + .get_iid = msm_bus_get_iid, + .assign_iids = msm_bus_assign_iids, +}; + +int msm_bus_board_rpm_get_il_ids(uint16_t *id) +{ + return -ENXIO; +} + +void msm_bus_board_init(struct msm_bus_fabric_registration *pdata) +{ + pdata->board_algo = &msm_bus_id_algo; +} + +void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata, + int nfab) +{ + if (nfab <= 0) + return; + + msm_bus_id_algo.board_nfab = nfab; +} diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc.c b/drivers/soc/qcom/msm_bus/msm_bus_noc.c new file mode 100644 index 000000000000..b3458df5c11b --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_noc.c @@ -0,0 +1,769 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__ + +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/msm-bus-board.h> +#include "msm_bus_core.h" +#include "msm_bus_noc.h" +#include "msm_bus_adhoc.h" + +/* NOC_QOS generic */ +#define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x)) +#define SAT_SCALE 16 /* 16 bytes minimum for saturation */ +#define BW_SCALE 256 /* 1/256 byte per cycle unit */ +#define QOS_DEFAULT_BASEOFFSET 0x00003000 +#define QOS_DEFAULT_DELTA 0x80 +#define MAX_BW_FIELD (NOC_QOS_BWn_BW_BMSK >> NOC_QOS_BWn_BW_SHFT) +#define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT) + +#define NOC_QOS_REG_BASE(b, o) ((b) + (o)) + +#define NOC_QOS_ID_COREIDn_ADDR(b, o, n, d) \ + (NOC_QOS_REG_BASE(b, o) + (d) * (n)) +enum noc_qos_id_coreidn { + NOC_QOS_ID_COREIDn_RMSK = 0xffffffff, + NOC_QOS_ID_COREIDn_MAXn = 32, + NOC_QOS_ID_COREIDn_CORECHSUM_BMSK = 0xffffff00, + NOC_QOS_ID_COREIDn_CORECHSUM_SHFT = 0x8, + NOC_QOS_ID_COREIDn_CORETYPEID_BMSK = 0xff, + NOC_QOS_ID_COREIDn_CORETYPEID_SHFT = 0x0, +}; + +#define NOC_QOS_ID_REVISIONIDn_ADDR(b, o, n, d) \ + (NOC_QOS_REG_BASE(b, o) + 0x4 + (d) * (n)) +enum noc_qos_id_revisionidn { + NOC_QOS_ID_REVISIONIDn_RMSK = 0xffffffff, + NOC_QOS_ID_REVISIONIDn_MAXn = 32, + NOC_QOS_ID_REVISIONIDn_FLEXNOCID_BMSK = 0xffffff00, + NOC_QOS_ID_REVISIONIDn_FLEXNOCID_SHFT = 0x8, + NOC_QOS_ID_REVISIONIDn_USERID_BMSK = 0xff, + NOC_QOS_ID_REVISIONIDn_USERID_SHFT = 0x0, +}; + +#define NOC_QOS_PRIORITYn_ADDR(b, o, n, d) \ + (NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n)) +enum noc_qos_id_priorityn { + NOC_QOS_PRIORITYn_RMSK = 0x0000000f, + NOC_QOS_PRIORITYn_MAXn = 32, + NOC_QOS_PRIORITYn_P1_BMSK = 0xc, + NOC_QOS_PRIORITYn_P1_SHFT = 0x2, + NOC_QOS_PRIORITYn_P0_BMSK = 0x3, + NOC_QOS_PRIORITYn_P0_SHFT = 0x0, +}; + +#define NOC_QOS_MODEn_ADDR(b, o, n, d) \ + (NOC_QOS_REG_BASE(b, o) + 0xC + (d) * (n)) +enum noc_qos_id_moden_rmsk { + NOC_QOS_MODEn_RMSK = 0x00000003, + NOC_QOS_MODEn_MAXn = 32, + NOC_QOS_MODEn_MODE_BMSK = 0x3, + NOC_QOS_MODEn_MODE_SHFT = 0x0, +}; + +#define NOC_QOS_BWn_ADDR(b, o, n, d) \ + (NOC_QOS_REG_BASE(b, o) + 0x10 + (d) * (n)) +enum noc_qos_id_bwn { + NOC_QOS_BWn_RMSK = 0x0000ffff, + NOC_QOS_BWn_MAXn = 32, + NOC_QOS_BWn_BW_BMSK = 0xffff, + NOC_QOS_BWn_BW_SHFT = 0x0, +}; + +/* QOS Saturation registers */ +#define NOC_QOS_SATn_ADDR(b, o, n, d) \ + (NOC_QOS_REG_BASE(b, o) + 0x14 + (d) * (n)) +enum noc_qos_id_saturationn { + NOC_QOS_SATn_RMSK = 0x000003ff, + NOC_QOS_SATn_MAXn = 32, + NOC_QOS_SATn_SAT_BMSK = 0x3ff, + NOC_QOS_SATn_SAT_SHFT = 0x0, +}; + +static int noc_div(uint64_t *a, uint32_t b) +{ + if ((*a > 0) && (*a < b)) + return 1; + else + return do_div(*a, b); +} + +/** + * Calculates bw hardware is using from register values + * bw returned is in bytes/sec + */ +static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq) +{ + uint64_t res; + uint32_t rem, scale; + + res = 2 * qos_freq * bw_field; + scale = BW_SCALE * 1000; + rem = noc_div(&res, scale); + MSM_BUS_DBG("NOC: Calculated bw: %llu\n", res * 1000000ULL); + return res * 1000000ULL; +} + +static uint32_t noc_bw_ceil(long int bw_field, uint32_t qos_freq) +{ + uint64_t bw_temp = 2 * qos_freq * bw_field; + uint32_t scale = 1000 * BW_SCALE; + noc_div(&bw_temp, scale); + return bw_temp * 1000000; +} +#define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase)) + +/** + * Calculates ws hardware is using from register values + * ws returned is in nanoseconds + */ +static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq) +{ + if (bw && qos_freq) { + uint32_t bwf = bw * qos_freq; + uint64_t scale = 1000000000000LL * BW_SCALE * + SAT_SCALE * sat; + noc_div(&scale, bwf); + MSM_BUS_DBG("NOC: Calculated ws: %llu\n", scale); + return scale; + } + + return 0; +} +#define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase)) + +/* Calculate bandwidth field value for requested bandwidth */ +static uint32_t noc_bw_field(uint64_t bw, uint32_t qos_freq) +{ + uint32_t bw_field = 0; + + if (bw) { + uint32_t rem; + uint64_t bw_capped = min_t(uint64_t, bw, MAX_BW(qos_freq)); + uint64_t bwc = bw_capped * BW_SCALE; + uint64_t qf = 2 * qos_freq * 1000; + + rem = noc_div(&bwc, qf); + bw_field = (uint32_t)min_t(uint64_t, bwc, MAX_BW_FIELD); + } + + MSM_BUS_DBG("NOC: bw_field: %u\n", bw_field); + return bw_field; +} + +static uint32_t noc_sat_field(uint64_t bw, uint32_t ws, uint32_t qos_freq) +{ + uint32_t sat_field = 0, win; + + if (bw) { + /* Limit to max bw and scale bw to 100 KB increments */ + uint64_t tbw, tscale; + uint64_t bw_scaled = min_t(uint64_t, bw, MAX_BW(qos_freq)); + uint32_t rem = noc_div(&bw_scaled, 100000); + + /** + * Calculate saturation from windows size. + * WS must be at least one arb period. + * Saturation must not exceed max field size + * + * Bandwidth is in 100KB increments + * Window size is in ns + * qos_freq is in KHz + */ + win = max(ws, 1000000 / qos_freq); + tbw = bw_scaled * win * qos_freq; + tscale = 10000000ULL * BW_SCALE * SAT_SCALE; + rem = noc_div(&tbw, tscale); + sat_field = (uint32_t)min_t(uint64_t, tbw, MAX_SAT_FIELD); + } + + MSM_BUS_DBG("NOC: sat_field: %d\n", sat_field); + return sat_field; +} + +static void noc_set_qos_mode(void __iomem *base, uint32_t qos_off, + uint32_t mport, uint32_t qos_delta, uint8_t mode, + uint8_t perm_mode) +{ + if (mode < NOC_QOS_MODE_MAX && + ((1 << mode) & perm_mode)) { + uint32_t reg_val; + + reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, + mport, qos_delta)) & NOC_QOS_MODEn_RMSK; + writel_relaxed(((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) | + (mode & NOC_QOS_MODEn_MODE_BMSK)), + NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta)); + } + /* Ensure qos mode is set before exiting */ + wmb(); +} + +static void noc_set_qos_priority(void __iomem *base, uint32_t qos_off, + uint32_t mport, uint32_t qos_delta, + struct msm_bus_noc_qos_priority *priority) +{ + uint32_t reg_val, val; + + reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, + qos_delta)) & NOC_QOS_PRIORITYn_RMSK; + val = priority->p1 << NOC_QOS_PRIORITYn_P1_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P1_BMSK))) | + (val & NOC_QOS_PRIORITYn_P1_BMSK)), + NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta)); + + reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, + qos_delta)) + & NOC_QOS_PRIORITYn_RMSK; + writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P0_BMSK))) | + (priority->p0 & NOC_QOS_PRIORITYn_P0_BMSK)), + NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta)); + /* Ensure qos priority is set before exiting */ + wmb(); +} + +static void msm_bus_noc_set_qos_bw(void __iomem *base, uint32_t qos_off, + uint32_t qos_freq, uint32_t mport, uint32_t qos_delta, + uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw) +{ + uint32_t reg_val, val, mode; + + if (!qos_freq) { + MSM_BUS_DBG("Zero QoS Freq\n"); + return; + } + + + /* If Limiter or Regulator modes are not supported, bw not available*/ + if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER | + NOC_QOS_PERM_MODE_REGULATOR)) { + uint32_t bw_val = noc_bw_field(qbw->bw, qos_freq); + uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws, + qos_freq); + + MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n", + perm_mode, bw_val, sat_val); + /* + * If in Limiter/Regulator mode, first go to fixed mode. + * Clear QoS accumulator + **/ + mode = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, + mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK; + if (mode == NOC_QOS_MODE_REGULATOR || mode == + NOC_QOS_MODE_LIMITER) { + reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR( + base, qos_off, mport, qos_delta)); + val = NOC_QOS_MODE_FIXED; + writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) + | (val & NOC_QOS_MODEn_MODE_BMSK), + NOC_QOS_MODEn_ADDR(base, qos_off, mport, + qos_delta)); + } + + reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(base, qos_off, mport, + qos_delta)); + val = bw_val << NOC_QOS_BWn_BW_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) | + (val & NOC_QOS_BWn_BW_BMSK)), + NOC_QOS_BWn_ADDR(base, qos_off, mport, qos_delta)); + + MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val & + (~NOC_QOS_BWn_BW_BMSK)) | (val & + NOC_QOS_BWn_BW_BMSK))); + + reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(base, qos_off, + mport, qos_delta)); + val = sat_val << NOC_QOS_SATn_SAT_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) | + (val & NOC_QOS_SATn_SAT_BMSK)), + NOC_QOS_SATn_ADDR(base, qos_off, mport, qos_delta)); + + MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val & + (~NOC_QOS_SATn_SAT_BMSK)) | (val & + NOC_QOS_SATn_SAT_BMSK))); + + /* Set mode back to what it was initially */ + reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, + mport, qos_delta)); + writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) + | (mode & NOC_QOS_MODEn_MODE_BMSK), + NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta)); + /* Ensure that all writes for bandwidth registers have + * completed before returning + */ + wmb(); + } +} + +uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off, + uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode) +{ + if (NOC_QOS_MODES_ALL_PERM == perm_mode) + return readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, + mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK; + else + return 31 - __CLZ(mode & + NOC_QOS_MODES_ALL_PERM); +} + +void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off, + uint32_t mport, uint32_t qos_delta, + struct msm_bus_noc_qos_priority *priority) +{ + priority->p1 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, + mport, qos_delta)) & NOC_QOS_PRIORITYn_P1_BMSK) >> + NOC_QOS_PRIORITYn_P1_SHFT; + + priority->p0 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, + mport, qos_delta)) & NOC_QOS_PRIORITYn_P0_BMSK) >> + NOC_QOS_PRIORITYn_P0_SHFT; +} + +void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off, + uint32_t qos_freq, + uint32_t mport, uint32_t qos_delta, uint8_t perm_mode, + struct msm_bus_noc_qos_bw *qbw) +{ + if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER | + NOC_QOS_PERM_MODE_REGULATOR)) { + uint32_t bw_val = readl_relaxed(NOC_QOS_BWn_ADDR( + base, qos_off, mport, qos_delta)) & NOC_QOS_BWn_BW_BMSK; + uint32_t sat = readl_relaxed(NOC_QOS_SATn_ADDR( + base, qos_off, mport, qos_delta)) + & NOC_QOS_SATn_SAT_BMSK; + + qbw->bw = noc_bw(bw_val, qos_freq); + qbw->ws = noc_ws(qbw->bw, sat, qos_freq); + } else { + qbw->bw = 0; + qbw->ws = 0; + } +} + +static int msm_bus_noc_mas_init(struct msm_bus_noc_info *ninfo, + struct msm_bus_inode_info *info) +{ + int i; + struct msm_bus_noc_qos_priority *prio; + prio = kzalloc(sizeof(struct msm_bus_noc_qos_priority), + GFP_KERNEL); + if (!prio) { + MSM_BUS_WARN("Couldn't alloc prio data for node: %d\n", + info->node_info->id); + return -ENOMEM; + } + + prio->read_prio = info->node_info->prio_rd; + prio->write_prio = info->node_info->prio_wr; + prio->p1 = info->node_info->prio1; + prio->p0 = info->node_info->prio0; + info->hw_data = (void *)prio; + + if (!info->node_info->qport) { + MSM_BUS_DBG("No QoS Ports to init\n"); + return 0; + } + + for (i = 0; i < info->node_info->num_mports; i++) { + if (info->node_info->mode != NOC_QOS_MODE_BYPASS) { + noc_set_qos_priority(ninfo->base, ninfo->qos_baseoffset, + info->node_info->qport[i], ninfo->qos_delta, + prio); + + if (info->node_info->mode != NOC_QOS_MODE_FIXED) { + struct msm_bus_noc_qos_bw qbw; + qbw.ws = info->node_info->ws; + qbw.bw = 0; + msm_bus_noc_set_qos_bw(ninfo->base, + ninfo->qos_baseoffset, + ninfo->qos_freq, info->node_info-> + qport[i], ninfo->qos_delta, + info->node_info->perm_mode, + &qbw); + } + } + + noc_set_qos_mode(ninfo->base, ninfo->qos_baseoffset, + info->node_info->qport[i], ninfo->qos_delta, + info->node_info->mode, + info->node_info->perm_mode); + } + + return 0; +} + +static void msm_bus_noc_node_init(void *hw_data, + struct msm_bus_inode_info *info) +{ + struct msm_bus_noc_info *ninfo = + (struct msm_bus_noc_info *)hw_data; + + if (!IS_SLAVE(info->node_info->priv_id)) + if (info->node_info->hw_sel != MSM_BUS_RPM) + msm_bus_noc_mas_init(ninfo, info); +} + +static int msm_bus_noc_allocate_commit_data(struct msm_bus_fabric_registration + *fab_pdata, void **cdata, int ctx) +{ + struct msm_bus_noc_commit **cd = (struct msm_bus_noc_commit **)cdata; + struct msm_bus_noc_info *ninfo = + (struct msm_bus_noc_info *)fab_pdata->hw_data; + + *cd = kzalloc(sizeof(struct msm_bus_noc_commit), GFP_KERNEL); + if (!*cd) { + MSM_BUS_DBG("Couldn't alloc mem for cdata\n"); + return -ENOMEM; + } + + (*cd)->mas = ninfo->cdata[ctx].mas; + (*cd)->slv = ninfo->cdata[ctx].slv; + + return 0; +} + +static void *msm_bus_noc_allocate_noc_data(struct platform_device *pdev, + struct msm_bus_fabric_registration *fab_pdata) +{ + struct resource *noc_mem; + struct resource *noc_io; + struct msm_bus_noc_info *ninfo; + int i; + + ninfo = kzalloc(sizeof(struct msm_bus_noc_info), GFP_KERNEL); + if (!ninfo) { + MSM_BUS_DBG("Couldn't alloc mem for noc info\n"); + return NULL; + } + + ninfo->nmasters = fab_pdata->nmasters; + ninfo->nqos_masters = fab_pdata->nmasters; + ninfo->nslaves = fab_pdata->nslaves; + ninfo->qos_freq = fab_pdata->qos_freq; + + if (!fab_pdata->qos_baseoffset) + ninfo->qos_baseoffset = QOS_DEFAULT_BASEOFFSET; + else + ninfo->qos_baseoffset = fab_pdata->qos_baseoffset; + + if (!fab_pdata->qos_delta) + ninfo->qos_delta = QOS_DEFAULT_DELTA; + else + ninfo->qos_delta = fab_pdata->qos_delta; + + ninfo->mas_modes = kzalloc(sizeof(uint32_t) * fab_pdata->nmasters, + GFP_KERNEL); + if (!ninfo->mas_modes) { + MSM_BUS_DBG("Couldn't alloc mem for noc master-modes\n"); + return NULL; + } + + for (i = 0; i < NUM_CTX; i++) { + ninfo->cdata[i].mas = kzalloc(sizeof(struct + msm_bus_node_hw_info) * fab_pdata->nmasters * 2, + GFP_KERNEL); + if (!ninfo->cdata[i].mas) { + MSM_BUS_DBG("Couldn't alloc mem for noc master-bw\n"); + kfree(ninfo->mas_modes); + kfree(ninfo); + return NULL; + } + + ninfo->cdata[i].slv = kzalloc(sizeof(struct + msm_bus_node_hw_info) * fab_pdata->nslaves * 2, + GFP_KERNEL); + if (!ninfo->cdata[i].slv) { + MSM_BUS_DBG("Couldn't alloc mem for noc master-bw\n"); + kfree(ninfo->cdata[i].mas); + goto err; + } + } + + /* If it's a virtual fabric, don't get memory info */ + if (fab_pdata->virt) + goto skip_mem; + + noc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!noc_mem && !fab_pdata->virt) { + MSM_BUS_ERR("Cannot get NoC Base address\n"); + goto err; + } + + noc_io = request_mem_region(noc_mem->start, + resource_size(noc_mem), pdev->name); + if (!noc_io) { + MSM_BUS_ERR("NoC memory unavailable\n"); + goto err; + } + + ninfo->base = ioremap(noc_mem->start, resource_size(noc_mem)); + if (!ninfo->base) { + MSM_BUS_ERR("IOremap failed for NoC!\n"); + release_mem_region(noc_mem->start, resource_size(noc_mem)); + goto err; + } + +skip_mem: + fab_pdata->hw_data = (void *)ninfo; + return (void *)ninfo; + +err: + kfree(ninfo->mas_modes); + kfree(ninfo); + return NULL; +} + +static void free_commit_data(void *cdata) +{ + struct msm_bus_noc_commit *cd = (struct msm_bus_noc_commit *)cdata; + + kfree(cd->mas); + kfree(cd->slv); + kfree(cd); +} + +static bool msm_bus_noc_update_bw_reg(int mode) +{ + bool ret = false; + + if ((mode == NOC_QOS_MODE_LIMITER) || + (mode == NOC_QOS_MODE_REGULATOR)) + ret = true; + + return ret; +} + +static void msm_bus_noc_update_bw(struct msm_bus_inode_info *hop, + struct msm_bus_inode_info *info, + struct msm_bus_fabric_registration *fab_pdata, + void *sel_cdata, int *master_tiers, + int64_t add_bw) +{ + struct msm_bus_noc_info *ninfo; + struct msm_bus_noc_qos_bw qos_bw; + int i, ports; + int64_t bw; + struct msm_bus_noc_commit *sel_cd = + (struct msm_bus_noc_commit *)sel_cdata; + + ninfo = (struct msm_bus_noc_info *)fab_pdata->hw_data; + if (!ninfo->qos_freq) { + MSM_BUS_DBG("NOC: No qos frequency to update bw\n"); + return; + } + + if (info->node_info->num_mports == 0) { + MSM_BUS_DBG("NOC: Skip Master BW\n"); + goto skip_mas_bw; + } + + ports = info->node_info->num_mports; + bw = INTERLEAVED_BW(fab_pdata, add_bw, ports); + + MSM_BUS_DBG("NOC: Update bw for: %d: %lld\n", + info->node_info->priv_id, add_bw); + for (i = 0; i < ports; i++) { + sel_cd->mas[info->node_info->masterp[i]].bw += bw; + sel_cd->mas[info->node_info->masterp[i]].hw_id = + info->node_info->mas_hw_id; + MSM_BUS_DBG("NOC: Update mas_bw: ID: %d, BW: %llu ports:%d\n", + info->node_info->priv_id, + sel_cd->mas[info->node_info->masterp[i]].bw, + ports); + /* Check if info is a shared master. + * If it is, mark it dirty + * If it isn't, then set QOS Bandwidth + **/ + if (info->node_info->hw_sel == MSM_BUS_RPM) + sel_cd->mas[info->node_info->masterp[i]].dirty = 1; + else { + if (!info->node_info->qport) { + MSM_BUS_DBG("No qos ports to update!\n"); + break; + } + + if (!(info->node_info->mode == NOC_QOS_MODE_REGULATOR) + || (info->node_info->mode == + NOC_QOS_MODE_LIMITER)) { + MSM_BUS_DBG("Skip QoS reg programming\n"); + break; + } + qos_bw.bw = sel_cd->mas[info->node_info->masterp[i]]. + bw; + qos_bw.ws = info->node_info->ws; + msm_bus_noc_set_qos_bw(ninfo->base, + ninfo->qos_baseoffset, + ninfo->qos_freq, + info->node_info->qport[i], ninfo->qos_delta, + info->node_info->perm_mode, &qos_bw); + MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n", + qos_bw.ws); + } + } + +skip_mas_bw: + ports = hop->node_info->num_sports; + for (i = 0; i < ports; i++) { + sel_cd->slv[hop->node_info->slavep[i]].bw += add_bw; + sel_cd->slv[hop->node_info->slavep[i]].hw_id = + hop->node_info->slv_hw_id; + MSM_BUS_DBG("NOC: Update slave_bw for ID: %d -> %llu\n", + hop->node_info->priv_id, + sel_cd->slv[hop->node_info->slavep[i]].bw); + MSM_BUS_DBG("NOC: Update slave_bw for hw_id: %d, index: %d\n", + hop->node_info->slv_hw_id, hop->node_info->slavep[i]); + /* Check if hop is a shared slave. + * If it is, mark it dirty + * If it isn't, then nothing to be done as the + * slaves are in bypass mode. + **/ + if (hop->node_info->hw_sel == MSM_BUS_RPM) + sel_cd->slv[hop->node_info->slavep[i]].dirty = 1; + } +} + +static int msm_bus_noc_commit(struct msm_bus_fabric_registration + *fab_pdata, void *hw_data, void **cdata) +{ + MSM_BUS_DBG("\nReached NOC Commit\n"); + msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata); + return 0; +} + +static int msm_bus_noc_port_halt(uint32_t haltid, uint8_t mport) +{ + return 0; +} + +static int msm_bus_noc_port_unhalt(uint32_t haltid, uint8_t mport) +{ + return 0; +} + +static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info, + void __iomem *qos_base, + uint32_t qos_off, uint32_t qos_delta, + uint32_t qos_freq) +{ + struct msm_bus_noc_qos_priority prio; + int ret = 0; + int i; + + prio.p1 = info->node_info->qos_params.prio1; + prio.p0 = info->node_info->qos_params.prio0; + + if (!info->node_info->qport) { + MSM_BUS_DBG("No QoS Ports to init\n"); + ret = 0; + goto err_qos_init; + } + + for (i = 0; i < info->node_info->num_qports; i++) { + if (info->node_info->qos_params.mode != NOC_QOS_MODE_BYPASS) { + noc_set_qos_priority(qos_base, qos_off, + info->node_info->qport[i], qos_delta, + &prio); + + if (info->node_info->qos_params.mode != + NOC_QOS_MODE_FIXED) { + struct msm_bus_noc_qos_bw qbw; + qbw.ws = info->node_info->qos_params.ws; + qbw.bw = 0; + msm_bus_noc_set_qos_bw(qos_base, qos_off, + qos_freq, + info->node_info->qport[i], + qos_delta, + info->node_info->qos_params.mode, + &qbw); + } + } + + noc_set_qos_mode(qos_base, qos_off, info->node_info->qport[i], + qos_delta, info->node_info->qos_params.mode, + (1 << info->node_info->qos_params.mode)); + } +err_qos_init: + return ret; +} + +static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev, + void __iomem *qos_base, + uint32_t qos_off, uint32_t qos_delta, + uint32_t qos_freq) +{ + int ret = 0; + uint64_t bw = 0; + int i; + struct msm_bus_node_info_type *info = dev->node_info; + + if (info && info->num_qports && + ((info->qos_params.mode == NOC_QOS_MODE_REGULATOR) || + (info->qos_params.mode == + NOC_QOS_MODE_LIMITER))) { + struct msm_bus_noc_qos_bw qos_bw; + + bw = msm_bus_div64(info->num_qports, + dev->node_ab.ab[DUAL_CTX]); + + for (i = 0; i < info->num_qports; i++) { + if (!info->qport) { + MSM_BUS_DBG("No qos ports to update!\n"); + break; + } + + qos_bw.bw = bw; + qos_bw.ws = info->qos_params.ws; + msm_bus_noc_set_qos_bw(qos_base, qos_off, qos_freq, + info->qport[i], qos_delta, + info->qos_params.mode, &qos_bw); + MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n", + qos_bw.ws); + } + } + return ret; +} +int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata, + struct msm_bus_hw_algorithm *hw_algo) +{ + /* Set interleaving to true by default */ + pdata->il_flag = true; + hw_algo->allocate_commit_data = msm_bus_noc_allocate_commit_data; + hw_algo->allocate_hw_data = msm_bus_noc_allocate_noc_data; + hw_algo->node_init = msm_bus_noc_node_init; + hw_algo->free_commit_data = free_commit_data; + hw_algo->update_bw = msm_bus_noc_update_bw; + hw_algo->commit = msm_bus_noc_commit; + hw_algo->port_halt = msm_bus_noc_port_halt; + hw_algo->port_unhalt = msm_bus_noc_port_unhalt; + hw_algo->update_bw_reg = msm_bus_noc_update_bw_reg; + hw_algo->config_master = NULL; + hw_algo->config_limiter = NULL; + + return 0; +} + +int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev) +{ + if (!bus_dev) + return -ENODEV; + else { + bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init; + bus_dev->fabdev->noc_ops.set_bw = msm_bus_noc_set_bw; + bus_dev->fabdev->noc_ops.limit_mport = NULL; + bus_dev->fabdev->noc_ops.update_bw_reg = + msm_bus_noc_update_bw_reg; + } + return 0; +} +EXPORT_SYMBOL(msm_bus_noc_set_ops); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc.h b/drivers/soc/qcom/msm_bus/msm_bus_noc.h new file mode 100644 index 000000000000..3995f63e79e3 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_noc.h @@ -0,0 +1,76 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H +#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H + +enum msm_bus_noc_qos_mode_type { + NOC_QOS_MODE_FIXED = 0, + NOC_QOS_MODE_LIMITER, + NOC_QOS_MODE_BYPASS, + NOC_QOS_MODE_REGULATOR, + NOC_QOS_MODE_MAX, +}; + +enum msm_bus_noc_qos_mode_perm { + NOC_QOS_PERM_MODE_FIXED = (1 << NOC_QOS_MODE_FIXED), + NOC_QOS_PERM_MODE_LIMITER = (1 << NOC_QOS_MODE_LIMITER), + NOC_QOS_PERM_MODE_BYPASS = (1 << NOC_QOS_MODE_BYPASS), + NOC_QOS_PERM_MODE_REGULATOR = (1 << NOC_QOS_MODE_REGULATOR), +}; + +#define NOC_QOS_MODES_ALL_PERM (NOC_QOS_PERM_MODE_FIXED | \ + NOC_QOS_PERM_MODE_LIMITER | NOC_QOS_PERM_MODE_BYPASS | \ + NOC_QOS_PERM_MODE_REGULATOR) + +struct msm_bus_noc_commit { + struct msm_bus_node_hw_info *mas; + struct msm_bus_node_hw_info *slv; +}; + +struct msm_bus_noc_info { + void __iomem *base; + uint32_t base_addr; + uint32_t nmasters; + uint32_t nqos_masters; + uint32_t nslaves; + uint32_t qos_freq; /* QOS Clock in KHz */ + uint32_t qos_baseoffset; + uint32_t qos_delta; + uint32_t *mas_modes; + struct msm_bus_noc_commit cdata[NUM_CTX]; +}; + +struct msm_bus_noc_qos_priority { + uint32_t high_prio; + uint32_t low_prio; + uint32_t read_prio; + uint32_t write_prio; + uint32_t p1; + uint32_t p0; +}; + +struct msm_bus_noc_qos_bw { + uint64_t bw; /* Bandwidth in bytes per second */ + uint32_t ws; /* Window size in nano seconds */ +}; + +void msm_bus_noc_init(struct msm_bus_noc_info *ninfo); +uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off, + uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode); +void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off, + uint32_t mport, uint32_t qos_delta, + struct msm_bus_noc_qos_priority *qprio); +void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off, + uint32_t qos_freq, uint32_t mport, uint32_t qos_delta, + uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw); +#endif /*_ARCH_ARM_MACH_MSM_BUS_NOC_H */ diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of.c b/drivers/soc/qcom/msm_bus/msm_bus_of.c new file mode 100644 index 000000000000..b625a6c8336e --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_of.c @@ -0,0 +1,703 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/msm-bus.h> +#include <linux/msm-bus-board.h> +#include "msm_bus_core.h" + +static const char * const hw_sel_name[] = {"RPM", "NoC", "BIMC", NULL}; +static const char * const mode_sel_name[] = {"Fixed", "Limiter", "Bypass", + "Regulator", NULL}; + +static int get_num(const char *const str[], const char *name) +{ + int i = 0; + + do { + if (!strcmp(name, str[i])) + return i; + + i++; + } while (str[i] != NULL); + + pr_err("Error: string %s not found\n", name); + return -EINVAL; +} + +static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev, + struct device_node *of_node) +{ + struct msm_bus_scale_pdata *pdata = NULL; + struct msm_bus_paths *usecase = NULL; + int i = 0, j, ret, num_usecases = 0, num_paths, len; + const uint32_t *vec_arr = NULL; + bool mem_err = false; + + if (!pdev) { + pr_err("Error: Null Platform device\n"); + return NULL; + } + + pdata = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_scale_pdata), + GFP_KERNEL); + if (!pdata) { + pr_err("Error: Memory allocation for pdata failed\n"); + mem_err = true; + goto err; + } + + ret = of_property_read_string(of_node, "qcom,msm-bus,name", + &pdata->name); + if (ret) { + pr_err("Error: Client name not found\n"); + goto err; + } + + ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases", + &num_usecases); + if (ret) { + pr_err("Error: num-usecases not found\n"); + goto err; + } + + pdata->num_usecases = num_usecases; + + if (of_property_read_bool(of_node, "qcom,msm-bus,active-only")) + pdata->active_only = 1; + else { + pr_debug("active_only flag absent.\n"); + pr_debug("Using dual context by default\n"); + } + + usecase = devm_kzalloc(&pdev->dev, (sizeof(struct msm_bus_paths) * + pdata->num_usecases), GFP_KERNEL); + if (!usecase) { + pr_err("Error: Memory allocation for paths failed\n"); + mem_err = true; + goto err; + } + + ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths", + &num_paths); + if (ret) { + pr_err("Error: num_paths not found\n"); + goto err; + } + + vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len); + if (vec_arr == NULL) { + pr_err("Error: Vector array not found\n"); + goto err; + } + + if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) { + pr_err("Error: Length-error on getting vectors\n"); + goto err; + } + + for (i = 0; i < num_usecases; i++) { + usecase[i].num_paths = num_paths; + usecase[i].vectors = devm_kzalloc(&pdev->dev, num_paths * + sizeof(struct msm_bus_vectors), GFP_KERNEL); + if (!usecase[i].vectors) { + mem_err = true; + pr_err("Error: Mem alloc failure in vectors\n"); + goto err; + } + + for (j = 0; j < num_paths; j++) { + int index = ((i * num_paths) + j) * 4; + usecase[i].vectors[j].src = be32_to_cpu(vec_arr[index]); + usecase[i].vectors[j].dst = + be32_to_cpu(vec_arr[index + 1]); + usecase[i].vectors[j].ab = (uint64_t) + KBTOB(be32_to_cpu(vec_arr[index + 2])); + usecase[i].vectors[j].ib = (uint64_t) + KBTOB(be32_to_cpu(vec_arr[index + 3])); + } + } + + pdata->usecase = usecase; + return pdata; +err: + if (mem_err) { + for (; i > 0; i--) + kfree(usecase[i-1].vectors); + + kfree(usecase); + kfree(pdata); + } + + return NULL; +} + +/** + * msm_bus_cl_get_pdata() - Generate bus client data from device tree + * provided by clients. + * + * of_node: Device tree node to extract information from + * + * The function returns a valid pointer to the allocated bus-scale-pdata + * if the vectors were correctly read from the client's device node. + * Any error in reading or parsing the device node will return NULL + * to the caller. + */ +struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev) +{ + struct device_node *of_node; + struct msm_bus_scale_pdata *pdata = NULL; + + if (!pdev) { + pr_err("Error: Null Platform device\n"); + return NULL; + } + + of_node = pdev->dev.of_node; + pdata = get_pdata(pdev, of_node); + if (!pdata) { + pr_err("client has to provide missing entry for successful registration\n"); + return NULL; + } + + return pdata; +} +EXPORT_SYMBOL(msm_bus_cl_get_pdata); + +/** + * msm_bus_cl_pdata_from_node() - Generate bus client data from device tree + * node provided by clients. This function should be used when a client + * driver needs to register multiple bus-clients from a single device-tree + * node associated with the platform-device. + * + * of_node: The subnode containing information about the bus scaling + * data + * + * pdev: Platform device associated with the device-tree node + * + * The function returns a valid pointer to the allocated bus-scale-pdata + * if the vectors were correctly read from the client's device node. + * Any error in reading or parsing the device node will return NULL + * to the caller. + */ +struct msm_bus_scale_pdata *msm_bus_pdata_from_node( + struct platform_device *pdev, struct device_node *of_node) +{ + struct msm_bus_scale_pdata *pdata = NULL; + + if (!pdev) { + pr_err("Error: Null Platform device\n"); + return NULL; + } + + if (!of_node) { + pr_err("Error: Null of_node passed to bus driver\n"); + return NULL; + } + + pdata = get_pdata(pdev, of_node); + if (!pdata) { + pr_err("client has to provide missing entry for successful registration\n"); + return NULL; + } + + return pdata; +} +EXPORT_SYMBOL(msm_bus_pdata_from_node); + +/** + * msm_bus_cl_clear_pdata() - Clear pdata allocated from device-tree + * of_node: Device tree node to extract information from + */ +void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata) +{ + int i; + + for (i = 0; i < pdata->num_usecases; i++) + kfree(pdata->usecase[i].vectors); + + kfree(pdata->usecase); + kfree(pdata); +} +EXPORT_SYMBOL(msm_bus_cl_clear_pdata); + +static int *get_arr(struct platform_device *pdev, + const struct device_node *node, const char *prop, + int *nports) +{ + int size = 0, ret; + int *arr = NULL; + + if (of_get_property(node, prop, &size)) { + *nports = size / sizeof(int); + } else { + pr_debug("Property %s not available\n", prop); + *nports = 0; + return NULL; + } + + if (!size) { + *nports = 0; + return NULL; + } + + arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(arr)) { + pr_err("Error: Failed to alloc mem for %s\n", prop); + return NULL; + } + + ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports); + if (ret) { + pr_err("Error in reading property: %s\n", prop); + goto err; + } + + return arr; +err: + devm_kfree(&pdev->dev, arr); + return NULL; +} + +static u64 *get_th_params(struct platform_device *pdev, + const struct device_node *node, const char *prop, + int *nports) +{ + int size = 0, ret; + u64 *ret_arr = NULL; + int *arr = NULL; + int i; + + if (of_get_property(node, prop, &size)) { + *nports = size / sizeof(int); + } else { + pr_debug("Property %s not available\n", prop); + *nports = 0; + return NULL; + } + + if (!size) { + *nports = 0; + return NULL; + } + + ret_arr = devm_kzalloc(&pdev->dev, (*nports * sizeof(u64)), + GFP_KERNEL); + if (ZERO_OR_NULL_PTR(ret_arr)) { + pr_err("Error: Failed to alloc mem for ret arr %s\n", prop); + return NULL; + } + + arr = kzalloc(size, GFP_KERNEL); + if ((ZERO_OR_NULL_PTR(arr))) { + pr_err("Error: Failed to alloc temp mem for %s\n", prop); + return NULL; + } + + ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports); + if (ret) { + pr_err("Error in reading property: %s\n", prop); + goto err; + } + + for (i = 0; i < *nports; i++) + ret_arr[i] = (uint64_t)KBTOB(arr[i]); + + MSM_BUS_DBG("%s: num entries %d prop %s", __func__, *nports, prop); + + for (i = 0; i < *nports; i++) + MSM_BUS_DBG("Th %d val %llu", i, ret_arr[i]); + + kfree(arr); + return ret_arr; +err: + kfree(arr); + devm_kfree(&pdev->dev, ret_arr); + return NULL; +} + +static struct msm_bus_node_info *get_nodes(struct device_node *of_node, + struct platform_device *pdev, + struct msm_bus_fabric_registration *pdata) +{ + struct msm_bus_node_info *info; + struct device_node *child_node = NULL; + int i = 0, ret; + int num_bw = 0; + u32 temp; + + for_each_child_of_node(of_node, child_node) { + i++; + } + + pdata->len = i; + info = (struct msm_bus_node_info *) + devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_node_info) * + pdata->len, GFP_KERNEL); + if (ZERO_OR_NULL_PTR(info)) { + pr_err("Failed to alloc memory for nodes: %d\n", pdata->len); + goto err; + } + + i = 0; + child_node = NULL; + for_each_child_of_node(of_node, child_node) { + const char *sel_str; + + ret = of_property_read_string(child_node, "label", + &info[i].name); + if (ret) + pr_err("Error reading node label\n"); + + ret = of_property_read_u32(child_node, "cell-id", &info[i].id); + if (ret) { + pr_err("Error reading node id\n"); + goto err; + } + + if (of_property_read_bool(child_node, "qcom,gateway")) + info[i].gateway = 1; + + of_property_read_u32(child_node, "qcom,mas-hw-id", + &info[i].mas_hw_id); + + of_property_read_u32(child_node, "qcom,slv-hw-id", + &info[i].slv_hw_id); + info[i].masterp = get_arr(pdev, child_node, + "qcom,masterp", &info[i].num_mports); + /* No need to store number of qports */ + info[i].qport = get_arr(pdev, child_node, + "qcom,qport", &ret); + pdata->nmasters += info[i].num_mports; + + + info[i].slavep = get_arr(pdev, child_node, + "qcom,slavep", &info[i].num_sports); + pdata->nslaves += info[i].num_sports; + + + info[i].tier = get_arr(pdev, child_node, + "qcom,tier", &info[i].num_tiers); + + if (of_property_read_bool(child_node, "qcom,ahb")) + info[i].ahb = 1; + + ret = of_property_read_string(child_node, "qcom,hw-sel", + &sel_str); + if (ret) + info[i].hw_sel = 0; + else { + ret = get_num(hw_sel_name, sel_str); + if (ret < 0) { + pr_err("Invalid hw-sel\n"); + goto err; + } + + info[i].hw_sel = ret; + } + + of_property_read_u32(child_node, "qcom,buswidth", + &info[i].buswidth); + of_property_read_u32(child_node, "qcom,ws", &info[i].ws); + + info[i].dual_conf = + of_property_read_bool(child_node, "qcom,dual-conf"); + + + info[i].th = get_th_params(pdev, child_node, "qcom,thresh", + &info[i].num_thresh); + + info[i].bimc_bw = get_th_params(pdev, child_node, + "qcom,bimc,bw", &num_bw); + + if (num_bw != info[i].num_thresh) { + pr_err("%s:num_bw %d must equal num_thresh %d", + __func__, num_bw, info[i].num_thresh); + pr_err("%s:Err setting up dual conf for %s", + __func__, info[i].name); + goto err; + } + + of_property_read_u32(child_node, "qcom,bimc,gp", + &info[i].bimc_gp); + of_property_read_u32(child_node, "qcom,bimc,thmp", + &info[i].bimc_thmp); + + ret = of_property_read_string(child_node, "qcom,mode-thresh", + &sel_str); + if (ret) + info[i].mode_thresh = 0; + else { + ret = get_num(mode_sel_name, sel_str); + if (ret < 0) { + pr_err("Unknown mode :%s\n", sel_str); + goto err; + } + + info[i].mode_thresh = ret; + MSM_BUS_DBG("AXI: THreshold mode set: %d\n", + info[i].mode_thresh); + } + + ret = of_property_read_string(child_node, "qcom,mode", + &sel_str); + + if (ret) + info[i].mode = 0; + else { + ret = get_num(mode_sel_name, sel_str); + if (ret < 0) { + pr_err("Unknown mode :%s\n", sel_str); + goto err; + } + + info[i].mode = ret; + } + + info[i].nr_lim = + of_property_read_bool(child_node, "qcom,nr-lim"); + + ret = of_property_read_u32(child_node, "qcom,ff", + &info[i].ff); + if (ret) { + pr_debug("fudge factor not present %d", info[i].id); + info[i].ff = 0; + } + + ret = of_property_read_u32(child_node, "qcom,floor-bw", + &temp); + if (ret) { + pr_debug("fabdev floor bw not present %d", info[i].id); + info[i].floor_bw = 0; + } else { + info[i].floor_bw = KBTOB(temp); + } + + info[i].rt_mas = + of_property_read_bool(child_node, "qcom,rt-mas"); + + ret = of_property_read_string(child_node, "qcom,perm-mode", + &sel_str); + if (ret) + info[i].perm_mode = 0; + else { + ret = get_num(mode_sel_name, sel_str); + if (ret < 0) + goto err; + + info[i].perm_mode = 1 << ret; + } + + of_property_read_u32(child_node, "qcom,prio-lvl", + &info[i].prio_lvl); + of_property_read_u32(child_node, "qcom,prio-rd", + &info[i].prio_rd); + of_property_read_u32(child_node, "qcom,prio-wr", + &info[i].prio_wr); + of_property_read_u32(child_node, "qcom,prio0", &info[i].prio0); + of_property_read_u32(child_node, "qcom,prio1", &info[i].prio1); + ret = of_property_read_string(child_node, "qcom,slaveclk-dual", + &info[i].slaveclk[DUAL_CTX]); + if (!ret) + pr_debug("Got slaveclk_dual: %s\n", + info[i].slaveclk[DUAL_CTX]); + else + info[i].slaveclk[DUAL_CTX] = NULL; + + ret = of_property_read_string(child_node, + "qcom,slaveclk-active", &info[i].slaveclk[ACTIVE_CTX]); + if (!ret) + pr_debug("Got slaveclk_active\n"); + else + info[i].slaveclk[ACTIVE_CTX] = NULL; + + ret = of_property_read_string(child_node, "qcom,memclk-dual", + &info[i].memclk[DUAL_CTX]); + if (!ret) + pr_debug("Got memclk_dual\n"); + else + info[i].memclk[DUAL_CTX] = NULL; + + ret = of_property_read_string(child_node, "qcom,memclk-active", + &info[i].memclk[ACTIVE_CTX]); + if (!ret) + pr_debug("Got memclk_active\n"); + else + info[i].memclk[ACTIVE_CTX] = NULL; + + ret = of_property_read_string(child_node, "qcom,iface-clk-node", + &info[i].iface_clk_node); + if (!ret) + pr_debug("Got iface_clk_node\n"); + else + info[i].iface_clk_node = NULL; + + pr_debug("Node name: %s\n", info[i].name); + of_node_put(child_node); + i++; + } + + pr_debug("Bus %d added: %d masters\n", pdata->id, pdata->nmasters); + pr_debug("Bus %d added: %d slaves\n", pdata->id, pdata->nslaves); + return info; +err: + return NULL; +} + +void msm_bus_of_get_nfab(struct platform_device *pdev, + struct msm_bus_fabric_registration *pdata) +{ + struct device_node *of_node; + int ret, nfab = 0; + + if (!pdev) { + pr_err("Error: Null platform device\n"); + return; + } + + of_node = pdev->dev.of_node; + ret = of_property_read_u32(of_node, "qcom,nfab", + &nfab); + if (!ret) + pr_debug("Fab_of: Read number of buses: %u\n", nfab); + + msm_bus_board_set_nfab(pdata, nfab); +} + +struct msm_bus_fabric_registration + *msm_bus_of_get_fab_data(struct platform_device *pdev) +{ + struct device_node *of_node; + struct msm_bus_fabric_registration *pdata; + bool mem_err = false; + int ret = 0; + const char *sel_str; + u32 temp; + + if (!pdev) { + pr_err("Error: Null platform device\n"); + return NULL; + } + + of_node = pdev->dev.of_node; + pdata = devm_kzalloc(&pdev->dev, + sizeof(struct msm_bus_fabric_registration), GFP_KERNEL); + if (!pdata) { + pr_err("Error: Memory allocation for pdata failed\n"); + mem_err = true; + goto err; + } + + ret = of_property_read_string(of_node, "label", &pdata->name); + if (ret) { + pr_err("Error: label not found\n"); + goto err; + } + pr_debug("Fab_of: Read name: %s\n", pdata->name); + + ret = of_property_read_u32(of_node, "cell-id", + &pdata->id); + if (ret) { + pr_err("Error: num-usecases not found\n"); + goto err; + } + pr_debug("Fab_of: Read id: %u\n", pdata->id); + + if (of_property_read_bool(of_node, "qcom,ahb")) + pdata->ahb = 1; + + ret = of_property_read_string(of_node, "qcom,fabclk-dual", + &pdata->fabclk[DUAL_CTX]); + if (ret) { + pr_debug("fabclk_dual not available\n"); + pdata->fabclk[DUAL_CTX] = NULL; + } else + pr_debug("Fab_of: Read clk dual ctx: %s\n", + pdata->fabclk[DUAL_CTX]); + ret = of_property_read_string(of_node, "qcom,fabclk-active", + &pdata->fabclk[ACTIVE_CTX]); + if (ret) { + pr_debug("Error: fabclk_active not available\n"); + pdata->fabclk[ACTIVE_CTX] = NULL; + } else + pr_debug("Fab_of: Read clk act ctx: %s\n", + pdata->fabclk[ACTIVE_CTX]); + + ret = of_property_read_u32(of_node, "qcom,ntieredslaves", + &pdata->ntieredslaves); + if (ret) { + pr_err("Error: ntieredslaves not found\n"); + goto err; + } + + ret = of_property_read_u32(of_node, "qcom,qos-freq", &pdata->qos_freq); + if (ret) + pr_debug("qos_freq not available\n"); + + ret = of_property_read_string(of_node, "qcom,hw-sel", &sel_str); + if (ret) { + pr_err("Error: hw_sel not found\n"); + goto err; + } else { + ret = get_num(hw_sel_name, sel_str); + if (ret < 0) + goto err; + + pdata->hw_sel = ret; + } + + if (of_property_read_bool(of_node, "qcom,virt")) + pdata->virt = true; + + ret = of_property_read_u32(of_node, "qcom,qos-baseoffset", + &pdata->qos_baseoffset); + if (ret) + pr_debug("%s:qos_baseoffset not available\n", __func__); + + ret = of_property_read_u32(of_node, "qcom,qos-delta", + &pdata->qos_delta); + if (ret) + pr_debug("%s:qos_delta not available\n", __func__); + + if (of_property_read_bool(of_node, "qcom,rpm-en")) + pdata->rpm_enabled = 1; + + ret = of_property_read_u32(of_node, "qcom,nr-lim-thresh", + &temp); + + if (ret) { + pr_err("nr-lim threshold not specified"); + pdata->nr_lim_thresh = 0; + } else { + pdata->nr_lim_thresh = KBTOB(temp); + } + + ret = of_property_read_u32(of_node, "qcom,eff-fact", + &pdata->eff_fact); + if (ret) { + pr_err("Fab eff-factor not present"); + pdata->eff_fact = 0; + } + + pdata->info = get_nodes(of_node, pdev, pdata); + return pdata; +err: + return NULL; +} +EXPORT_SYMBOL(msm_bus_of_get_fab_data); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c new file mode 100644 index 000000000000..109febcaa68c --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c @@ -0,0 +1,642 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/msm-bus.h> +#include <linux/msm-bus-board.h> +#include <linux/msm_bus_rules.h> +#include "msm_bus_core.h" +#include "msm_bus_adhoc.h" + +#define DEFAULT_QOS_FREQ 19200 +#define DEFAULT_UTIL_FACT 100 +#define DEFAULT_VRAIL_COMP 100 + +static int get_qos_mode(struct platform_device *pdev, + struct device_node *node, const char *qos_mode) +{ + const char *qos_names[] = {"fixed", "limiter", "bypass", "regulator"}; + int i = 0; + int ret = -1; + + if (!qos_mode) + goto exit_get_qos_mode; + + for (i = 0; i < ARRAY_SIZE(qos_names); i++) { + if (!strcmp(qos_mode, qos_names[i])) + break; + } + if (i == ARRAY_SIZE(qos_names)) + dev_err(&pdev->dev, "Cannot match mode qos %s using Bypass", + qos_mode); + else + ret = i; + +exit_get_qos_mode: + return ret; +} + +static int *get_arr(struct platform_device *pdev, + struct device_node *node, const char *prop, + int *nports) +{ + int size = 0, ret; + int *arr = NULL; + + if (of_get_property(node, prop, &size)) { + *nports = size / sizeof(int); + } else { + dev_dbg(&pdev->dev, "Property %s not available\n", prop); + *nports = 0; + return NULL; + } + + arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if ((size > 0) && ZERO_OR_NULL_PTR(arr)) { + dev_err(&pdev->dev, "Error: Failed to alloc mem for %s\n", + prop); + return NULL; + } + + ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports); + if (ret) { + dev_err(&pdev->dev, "Error in reading property: %s\n", prop); + goto arr_err; + } + + return arr; +arr_err: + devm_kfree(&pdev->dev, arr); + return NULL; +} + +static struct msm_bus_fab_device_type *get_fab_device_info( + struct device_node *dev_node, + struct platform_device *pdev) +{ + struct msm_bus_fab_device_type *fab_dev; + unsigned int ret; + struct resource *res; + const char *base_name; + + fab_dev = devm_kzalloc(&pdev->dev, + sizeof(struct msm_bus_fab_device_type), + GFP_KERNEL); + if (!fab_dev) { + dev_err(&pdev->dev, + "Error: Unable to allocate memory for fab_dev\n"); + return NULL; + } + + ret = of_property_read_string(dev_node, "qcom,base-name", &base_name); + if (ret) { + dev_err(&pdev->dev, "Error: Unable to get base address name\n"); + goto fab_dev_err; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, base_name); + if (!res) { + dev_err(&pdev->dev, "Error getting qos base addr %s\n", + base_name); + goto fab_dev_err; + } + fab_dev->pqos_base = res->start; + fab_dev->qos_range = resource_size(res); + fab_dev->bypass_qos_prg = of_property_read_bool(dev_node, + "qcom,bypass-qos-prg"); + + ret = of_property_read_u32(dev_node, "qcom,base-offset", + &fab_dev->base_offset); + if (ret) + dev_dbg(&pdev->dev, "Bus base offset is missing\n"); + + ret = of_property_read_u32(dev_node, "qcom,qos-off", + &fab_dev->qos_off); + if (ret) + dev_dbg(&pdev->dev, "Bus qos off is missing\n"); + + + ret = of_property_read_u32(dev_node, "qcom,bus-type", + &fab_dev->bus_type); + if (ret) { + dev_warn(&pdev->dev, "Bus type is missing\n"); + goto fab_dev_err; + } + + ret = of_property_read_u32(dev_node, "qcom,qos-freq", + &fab_dev->qos_freq); + if (ret) { + dev_dbg(&pdev->dev, "Bus qos freq is missing\n"); + fab_dev->qos_freq = DEFAULT_QOS_FREQ; + } + + ret = of_property_read_u32(dev_node, "qcom,util-fact", + &fab_dev->util_fact); + if (ret) { + dev_info(&pdev->dev, "Util-fact is missing, default to %d\n", + DEFAULT_UTIL_FACT); + fab_dev->util_fact = DEFAULT_UTIL_FACT; + } + + ret = of_property_read_u32(dev_node, "qcom,vrail-comp", + &fab_dev->vrail_comp); + if (ret) { + dev_info(&pdev->dev, "Vrail-comp is missing, default to %d\n", + DEFAULT_VRAIL_COMP); + fab_dev->vrail_comp = DEFAULT_VRAIL_COMP; + } + + return fab_dev; + +fab_dev_err: + devm_kfree(&pdev->dev, fab_dev); + fab_dev = 0; + return NULL; +} + +static void get_qos_params( + struct device_node * const dev_node, + struct platform_device * const pdev, + struct msm_bus_node_info_type *node_info) +{ + const char *qos_mode = NULL; + unsigned int ret; + unsigned int temp; + + ret = of_property_read_string(dev_node, "qcom,qos-mode", &qos_mode); + + if (ret) + node_info->qos_params.mode = -1; + else + node_info->qos_params.mode = get_qos_mode(pdev, dev_node, + qos_mode); + + of_property_read_u32(dev_node, "qcom,prio-lvl", + &node_info->qos_params.prio_lvl); + + of_property_read_u32(dev_node, "qcom,prio1", + &node_info->qos_params.prio1); + + of_property_read_u32(dev_node, "qcom,prio0", + &node_info->qos_params.prio0); + + of_property_read_u32(dev_node, "qcom,prio-rd", + &node_info->qos_params.prio_rd); + + of_property_read_u32(dev_node, "qcom,prio-wr", + &node_info->qos_params.prio_wr); + + of_property_read_u32(dev_node, "qcom,gp", + &node_info->qos_params.gp); + + of_property_read_u32(dev_node, "qcom,thmp", + &node_info->qos_params.thmp); + + of_property_read_u32(dev_node, "qcom,ws", + &node_info->qos_params.ws); + + ret = of_property_read_u32(dev_node, "qcom,bw_buffer", &temp); + + if (ret) + node_info->qos_params.bw_buffer = 0; + else + node_info->qos_params.bw_buffer = KBTOB(temp); + +} + + +static struct msm_bus_node_info_type *get_node_info_data( + struct device_node * const dev_node, + struct platform_device * const pdev) +{ + struct msm_bus_node_info_type *node_info; + unsigned int ret; + int size; + int i; + struct device_node *con_node; + struct device_node *bus_dev; + + node_info = devm_kzalloc(&pdev->dev, + sizeof(struct msm_bus_node_info_type), + GFP_KERNEL); + if (!node_info) { + dev_err(&pdev->dev, + "Error: Unable to allocate memory for node_info\n"); + return NULL; + } + + ret = of_property_read_u32(dev_node, "cell-id", &node_info->id); + if (ret) { + dev_warn(&pdev->dev, "Bus node is missing cell-id\n"); + goto node_info_err; + } + ret = of_property_read_string(dev_node, "label", &node_info->name); + if (ret) { + dev_warn(&pdev->dev, "Bus node is missing name\n"); + goto node_info_err; + } + node_info->qport = get_arr(pdev, dev_node, "qcom,qport", + &node_info->num_qports); + + if (of_get_property(dev_node, "qcom,connections", &size)) { + node_info->num_connections = size / sizeof(int); + node_info->connections = devm_kzalloc(&pdev->dev, size, + GFP_KERNEL); + } else { + node_info->num_connections = 0; + node_info->connections = 0; + } + + for (i = 0; i < node_info->num_connections; i++) { + con_node = of_parse_phandle(dev_node, "qcom,connections", i); + if (IS_ERR_OR_NULL(con_node)) + goto node_info_err; + + if (of_property_read_u32(con_node, "cell-id", + &node_info->connections[i])) + goto node_info_err; + of_node_put(con_node); + } + + if (of_get_property(dev_node, "qcom,blacklist", &size)) { + node_info->num_blist = size/sizeof(u32); + node_info->black_listed_connections = devm_kzalloc(&pdev->dev, + size, GFP_KERNEL); + } else { + node_info->num_blist = 0; + node_info->black_listed_connections = 0; + } + + for (i = 0; i < node_info->num_blist; i++) { + con_node = of_parse_phandle(dev_node, "qcom,blacklist", i); + if (IS_ERR_OR_NULL(con_node)) + goto node_info_err; + + if (of_property_read_u32(con_node, "cell-id", + &node_info->black_listed_connections[i])) + goto node_info_err; + of_node_put(con_node); + } + + bus_dev = of_parse_phandle(dev_node, "qcom,bus-dev", 0); + if (!IS_ERR_OR_NULL(bus_dev)) { + if (of_property_read_u32(bus_dev, "cell-id", + &node_info->bus_device_id)) { + dev_err(&pdev->dev, "Can't find bus device. Node %d", + node_info->id); + goto node_info_err; + } + + of_node_put(bus_dev); + } else + dev_dbg(&pdev->dev, "Can't find bdev phandle for %d", + node_info->id); + + node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev"); + node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev"); + + ret = of_property_read_u32(dev_node, "qcom,buswidth", + &node_info->buswidth); + if (ret) { + dev_dbg(&pdev->dev, "Using default 8 bytes %d", node_info->id); + node_info->buswidth = 8; + } + + ret = of_property_read_u32(dev_node, "qcom,mas-rpm-id", + &node_info->mas_rpm_id); + if (ret) { + dev_dbg(&pdev->dev, "mas rpm id is missing\n"); + node_info->mas_rpm_id = -1; + } + + ret = of_property_read_u32(dev_node, "qcom,slv-rpm-id", + &node_info->slv_rpm_id); + if (ret) { + dev_dbg(&pdev->dev, "slv rpm id is missing\n"); + node_info->slv_rpm_id = -1; + } + get_qos_params(dev_node, pdev, node_info); + + return node_info; + +node_info_err: + devm_kfree(&pdev->dev, node_info); + node_info = 0; + return NULL; +} + +static unsigned int get_bus_node_device_data( + struct device_node * const dev_node, + struct platform_device * const pdev, + struct msm_bus_node_device_type * const node_device) +{ + node_device->node_info = get_node_info_data(dev_node, pdev); + if (IS_ERR_OR_NULL(node_device->node_info)) { + dev_err(&pdev->dev, "Error: Node info missing\n"); + return -ENODATA; + } + node_device->ap_owned = of_property_read_bool(dev_node, + "qcom,ap-owned"); + + if (node_device->node_info->is_fab_dev) { + dev_err(&pdev->dev, "Dev %d\n", node_device->node_info->id); + + if (!node_device->node_info->virt_dev) { + node_device->fabdev = + get_fab_device_info(dev_node, pdev); + if (IS_ERR_OR_NULL(node_device->fabdev)) { + dev_err(&pdev->dev, + "Error: Fabric device info missing\n"); + devm_kfree(&pdev->dev, node_device->node_info); + return -ENODATA; + } + } + node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node, + "bus_clk"); + + if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk)) + dev_err(&pdev->dev, + "%s:Failed to get bus clk for bus%d ctx%d", + __func__, node_device->node_info->id, + DUAL_CTX); + + node_device->clk[ACTIVE_CTX].clk = of_clk_get_by_name(dev_node, + "bus_a_clk"); + if (IS_ERR_OR_NULL(node_device->clk[ACTIVE_CTX].clk)) + dev_err(&pdev->dev, + "Failed to get bus clk for bus%d ctx%d", + node_device->node_info->id, ACTIVE_CTX); + + node_device->qos_clk.clk = of_clk_get_by_name(dev_node, + "bus_qos_clk"); + + if (IS_ERR_OR_NULL(node_device->qos_clk.clk)) + dev_dbg(&pdev->dev, + "%s:Failed to get bus qos clk for %d", + __func__, node_device->node_info->id); + + if (msmbus_coresight_init_adhoc(pdev, dev_node)) + dev_warn(&pdev->dev, + "Coresight support absent for bus: %d\n", + node_device->node_info->id); + } else { + node_device->qos_clk.clk = of_clk_get_by_name(dev_node, + "bus_qos_clk"); + + if (IS_ERR_OR_NULL(node_device->qos_clk.clk)) + dev_dbg(&pdev->dev, + "%s:Failed to get bus qos clk for mas%d", + __func__, node_device->node_info->id); + + node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node, + "node_clk"); + + if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk)) + dev_dbg(&pdev->dev, + "%s:Failed to get bus clk for bus%d ctx%d", + __func__, node_device->node_info->id, + DUAL_CTX); + + } + return 0; +} + +struct msm_bus_device_node_registration + *msm_bus_of_to_pdata(struct platform_device *pdev) +{ + struct device_node *of_node, *child_node; + struct msm_bus_device_node_registration *pdata; + unsigned int i = 0, j; + unsigned int ret; + + if (!pdev) { + pr_err("Error: Null platform device\n"); + return NULL; + } + + of_node = pdev->dev.of_node; + + pdata = devm_kzalloc(&pdev->dev, + sizeof(struct msm_bus_device_node_registration), + GFP_KERNEL); + if (!pdata) { + dev_err(&pdev->dev, + "Error: Memory allocation for pdata failed\n"); + return NULL; + } + + pdata->num_devices = of_get_child_count(of_node); + + pdata->info = devm_kzalloc(&pdev->dev, + sizeof(struct msm_bus_node_device_type) * + pdata->num_devices, GFP_KERNEL); + + if (!pdata->info) { + dev_err(&pdev->dev, + "Error: Memory allocation for pdata->info failed\n"); + goto node_reg_err; + } + + ret = 0; + for_each_child_of_node(of_node, child_node) { + ret = get_bus_node_device_data(child_node, pdev, + &pdata->info[i]); + if (ret) { + dev_err(&pdev->dev, "Error: unable to initialize bus nodes\n"); + goto node_reg_err_1; + } + i++; + } + + dev_dbg(&pdev->dev, "bus topology:\n"); + for (i = 0; i < pdata->num_devices; i++) { + dev_dbg(&pdev->dev, "id %d\nnum_qports %d\nnum_connections %d", + pdata->info[i].node_info->id, + pdata->info[i].node_info->num_qports, + pdata->info[i].node_info->num_connections); + dev_dbg(&pdev->dev, "\nbus_device_id %d\n buswidth %d\n", + pdata->info[i].node_info->bus_device_id, + pdata->info[i].node_info->buswidth); + for (j = 0; j < pdata->info[i].node_info->num_connections; + j++) { + dev_dbg(&pdev->dev, "connection[%d]: %d\n", j, + pdata->info[i].node_info->connections[j]); + } + for (j = 0; j < pdata->info[i].node_info->num_blist; + j++) { + dev_dbg(&pdev->dev, "black_listed_node[%d]: %d\n", j, + pdata->info[i].node_info-> + black_listed_connections[j]); + } + if (pdata->info[i].fabdev) + dev_dbg(&pdev->dev, "base_addr %zu\nbus_type %d\n", + (size_t)pdata->info[i]. + fabdev->pqos_base, + pdata->info[i].fabdev->bus_type); + } + return pdata; + +node_reg_err_1: + devm_kfree(&pdev->dev, pdata->info); +node_reg_err: + devm_kfree(&pdev->dev, pdata); + pdata = NULL; + return NULL; +} + +static int msm_bus_of_get_ids(struct platform_device *pdev, + struct device_node *dev_node, int **dev_ids, + int *num_ids, char *prop_name) +{ + int ret = 0; + int size, i; + struct device_node *rule_node; + int *ids = NULL; + + if (of_get_property(dev_node, prop_name, &size)) { + *num_ids = size / sizeof(int); + ids = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + } else { + dev_err(&pdev->dev, "No rule nodes, skipping node"); + ret = -ENXIO; + goto exit_get_ids; + } + + *dev_ids = ids; + for (i = 0; i < *num_ids; i++) { + rule_node = of_parse_phandle(dev_node, prop_name, i); + if (IS_ERR_OR_NULL(rule_node)) { + dev_err(&pdev->dev, "Can't get rule node id"); + ret = -ENXIO; + goto err_get_ids; + } + + if (of_property_read_u32(rule_node, "cell-id", + &ids[i])) { + dev_err(&pdev->dev, "Can't get rule node id"); + ret = -ENXIO; + goto err_get_ids; + } + of_node_put(rule_node); + } +exit_get_ids: + return ret; +err_get_ids: + devm_kfree(&pdev->dev, ids); + of_node_put(rule_node); + ids = NULL; + return ret; +} + +int msm_bus_of_get_static_rules(struct platform_device *pdev, + struct bus_rule_type **static_rules) +{ + int ret = 0; + struct device_node *of_node, *child_node; + int num_rules = 0; + int rule_idx = 0; + int bw_fld = 0; + int i; + struct bus_rule_type *static_rule = NULL; + + of_node = pdev->dev.of_node; + num_rules = of_get_child_count(of_node); + static_rule = devm_kzalloc(&pdev->dev, + sizeof(struct bus_rule_type) * num_rules, + GFP_KERNEL); + + if (IS_ERR_OR_NULL(static_rule)) { + ret = -ENOMEM; + goto exit_static_rules; + } + + *static_rules = static_rule; + for_each_child_of_node(of_node, child_node) { + ret = msm_bus_of_get_ids(pdev, child_node, + &static_rule[rule_idx].src_id, + &static_rule[rule_idx].num_src, + "qcom,src-nodes"); + + ret = msm_bus_of_get_ids(pdev, child_node, + &static_rule[rule_idx].dst_node, + &static_rule[rule_idx].num_dst, + "qcom,dest-node"); + + ret = of_property_read_u32(child_node, "qcom,src-field", + &static_rule[rule_idx].src_field); + if (ret) { + dev_err(&pdev->dev, "src-field missing"); + ret = -ENXIO; + goto err_static_rules; + } + + ret = of_property_read_u32(child_node, "qcom,src-op", + &static_rule[rule_idx].op); + if (ret) { + dev_err(&pdev->dev, "src-op missing"); + ret = -ENXIO; + goto err_static_rules; + } + + ret = of_property_read_u32(child_node, "qcom,mode", + &static_rule[rule_idx].mode); + if (ret) { + dev_err(&pdev->dev, "mode missing"); + ret = -ENXIO; + goto err_static_rules; + } + + ret = of_property_read_u32(child_node, "qcom,thresh", &bw_fld); + if (ret) { + dev_err(&pdev->dev, "thresh missing"); + ret = -ENXIO; + goto err_static_rules; + } else + static_rule[rule_idx].thresh = KBTOB(bw_fld); + + ret = of_property_read_u32(child_node, "qcom,dest-bw", + &bw_fld); + if (ret) + static_rule[rule_idx].dst_bw = 0; + else + static_rule[rule_idx].dst_bw = KBTOB(bw_fld); + + rule_idx++; + } + ret = rule_idx; +exit_static_rules: + return ret; +err_static_rules: + for (i = 0; i < num_rules; i++) { + if (!IS_ERR_OR_NULL(static_rule)) { + if (!IS_ERR_OR_NULL(static_rule[i].src_id)) + devm_kfree(&pdev->dev, + static_rule[i].src_id); + if (!IS_ERR_OR_NULL(static_rule[i].dst_node)) + devm_kfree(&pdev->dev, + static_rule[i].dst_node); + devm_kfree(&pdev->dev, static_rule); + } + } + devm_kfree(&pdev->dev, *static_rules); + static_rules = NULL; + return ret; +} diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c new file mode 100644 index 000000000000..be64c78b7f13 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c @@ -0,0 +1,257 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ + +#include "msm_bus_core.h" +#include <linux/msm-bus.h> +#include <linux/msm-bus-board.h> +#include <linux/soc/qcom/smd-rpm.h> + +/* Stubs for backward compatibility */ +void msm_bus_rpm_set_mt_mask() +{ +} + +bool msm_bus_rpm_is_mem_interleaved(void) +{ + return true; +} + +struct commit_data { + struct msm_bus_node_hw_info *mas_arb; + struct msm_bus_node_hw_info *slv_arb; +}; + +#ifdef CONFIG_DEBUG_FS +void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size, + void *cdata, int nmasters, int nslaves, int ntslaves) +{ + int c; + struct commit_data *cd = (struct commit_data *)cdata; + + *curr += scnprintf(buf + *curr, max_size - *curr, "\nMas BW:\n"); + for (c = 0; c < nmasters; c++) + *curr += scnprintf(buf + *curr, max_size - *curr, + "%d: %llu\t", cd->mas_arb[c].hw_id, + cd->mas_arb[c].bw); + *curr += scnprintf(buf + *curr, max_size - *curr, "\nSlave BW:\n"); + for (c = 0; c < nslaves; c++) { + *curr += scnprintf(buf + *curr, max_size - *curr, + "%d: %llu\t", cd->slv_arb[c].hw_id, + cd->slv_arb[c].bw); + } +} +#endif + +static int msm_bus_rpm_compare_cdata( + struct msm_bus_fabric_registration *fab_pdata, + struct commit_data *cd1, struct commit_data *cd2) +{ + size_t n; + int ret; + + n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nmasters * 2; + ret = memcmp(cd1->mas_arb, cd2->mas_arb, n); + if (ret) { + MSM_BUS_DBG("Master Arb Data not equal\n"); + return ret; + } + + n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nslaves * 2; + ret = memcmp(cd1->slv_arb, cd2->slv_arb, n); + if (ret) { + MSM_BUS_DBG("Master Arb Data not equal\n"); + return ret; + } + + return 0; +} + +static int msm_bus_rpm_req(int ctx, uint32_t rsc_type, uint32_t key, + struct msm_bus_node_hw_info *hw_info, bool valid) +{ + struct qcom_msm_bus_req req = { + .key = key, + }; + int ret = 0; + + if (ctx == ACTIVE_CTX) + ctx = QCOM_SMD_RPM_ACTIVE_STATE; + else if (ctx == DUAL_CTX) + ctx = QCOM_SMD_RPM_SLEEP_STATE; + +#if 0 + rpm_req = msm_rpm_create_request(ctx, rsc_type, hw_info->hw_id, 1); + if (rpm_req == NULL) { + MSM_BUS_WARN("RPM: Couldn't create RPM Request\n"); + return -ENXIO; + } +#endif + + if (valid) { + req.value = hw_info->bw; + req.nbytes = sizeof(uint64_t); +#if 0 + ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *) + &hw_info->bw, (int)(sizeof(uint64_t))); + if (ret) { + MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n", + rsc_type); + goto free_rpm_request; + } + + MSM_BUS_DBG("Added Key: %d, Val: %llu, size: %zu\n", key, + hw_info->bw, sizeof(uint64_t)); +#endif + } else { + req.value = 0; + req.nbytes = 0; +#if 0 + /* Invalidate RPM requests */ + ret = msm_rpm_add_kvp_data(rpm_req, 0, NULL, 0); + if (ret) { + MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n", + rsc_type); + goto free_rpm_request; + } +#endif + } + +#if 0 + msg_id = msm_rpm_send_request(rpm_req); + if (!msg_id) { + MSM_BUS_WARN("RPM: No message ID for req\n"); + ret = -ENXIO; + goto free_rpm_request; + } + + ret = msm_rpm_wait_for_ack(msg_id); + if (ret) { + MSM_BUS_WARN("RPM: Ack failed\n"); + goto free_rpm_request; + } + +free_rpm_request: + msm_rpm_free_request(rpm_req); +#endif + ret = qcom_rpm_bus_send_message(ctx, rsc_type, hw_info->hw_id, &req); + + return ret; +} + +static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration + *fab_pdata, int ctx, void *rpm_data, + struct commit_data *cd, bool valid) +{ + int i, status = 0, rsc_type, key; + + MSM_BUS_DBG("Context: %d\n", ctx); + rsc_type = RPM_BUS_MASTER_REQ; + key = RPM_MASTER_FIELD_BW; + for (i = 0; i < fab_pdata->nmasters; i++) { + if (cd->mas_arb[i].dirty) { + MSM_BUS_DBG("MAS HWID: %d, BW: %llu DIRTY: %d\n", + cd->mas_arb[i].hw_id, + cd->mas_arb[i].bw, + cd->mas_arb[i].dirty); + status = msm_bus_rpm_req(ctx, rsc_type, key, + &cd->mas_arb[i], valid); + if (status) { + MSM_BUS_ERR("RPM: Req fail: mas:%d, bw:%llu\n", + cd->mas_arb[i].hw_id, + cd->mas_arb[i].bw); + break; + } else { + cd->mas_arb[i].dirty = false; + } + } + } + + rsc_type = RPM_BUS_SLAVE_REQ; + key = RPM_SLAVE_FIELD_BW; + for (i = 0; i < fab_pdata->nslaves; i++) { + if (cd->slv_arb[i].dirty) { + MSM_BUS_DBG("SLV HWID: %d, BW: %llu DIRTY: %d\n", + cd->slv_arb[i].hw_id, + cd->slv_arb[i].bw, + cd->slv_arb[i].dirty); + status = msm_bus_rpm_req(ctx, rsc_type, key, + &cd->slv_arb[i], valid); + if (status) { + MSM_BUS_ERR("RPM: Req fail: slv:%d, bw:%llu\n", + cd->slv_arb[i].hw_id, + cd->slv_arb[i].bw); + break; + } else { + cd->slv_arb[i].dirty = false; + } + } + } + + return status; +} + +/** +* msm_bus_remote_hw_commit() - Commit the arbitration data to RPM +* @fabric: Fabric for which the data should be committed +**/ +int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration + *fab_pdata, void *hw_data, void **cdata) +{ + + int ret; + bool valid; + struct commit_data *dual_cd, *act_cd; + void *rpm_data = hw_data; + + MSM_BUS_DBG("\nReached RPM Commit\n"); + dual_cd = (struct commit_data *)cdata[DUAL_CTX]; + act_cd = (struct commit_data *)cdata[ACTIVE_CTX]; + + /* + * If the arb data for active set and sleep set is + * different, commit both sets. + * If the arb data for active set and sleep set is + * the same, invalidate the sleep set. + */ + ret = msm_bus_rpm_compare_cdata(fab_pdata, act_cd, dual_cd); + if (!ret) + /* Invalidate sleep set.*/ + valid = false; + else + valid = true; + + ret = msm_bus_rpm_commit_arb(fab_pdata, DUAL_CTX, rpm_data, + dual_cd, valid); + if (ret) + MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n", + fab_pdata->id, DUAL_CTX); + + valid = true; + ret = msm_bus_rpm_commit_arb(fab_pdata, ACTIVE_CTX, rpm_data, act_cd, + valid); + if (ret) + MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n", + fab_pdata->id, ACTIVE_CTX); + + return ret; +} + +int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata, + struct msm_bus_hw_algorithm *hw_algo) +{ + if (!pdata->ahb) + pdata->rpm_enabled = 1; + return 0; +} diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c new file mode 100644 index 000000000000..43422a7fddd9 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c @@ -0,0 +1,634 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/list_sort.h> +#include <linux/msm-bus-board.h> +#include <linux/msm_bus_rules.h> +#include <linux/slab.h> +#include <linux/types.h> + +struct node_vote_info { + int id; + u64 ib; + u64 ab; + u64 clk; +}; + +struct rules_def { + int rule_id; + int num_src; + int state; + struct node_vote_info *src_info; + struct bus_rule_type rule_ops; + bool state_change; + struct list_head link; +}; + +struct rule_node_info { + int id; + void *data; + struct raw_notifier_head rule_notify_list; + int cur_rule; + int num_rules; + struct list_head node_rules; + struct list_head link; + struct rule_apply_rcm_info apply; +}; + +DEFINE_MUTEX(msm_bus_rules_lock); +static LIST_HEAD(node_list); +static struct rule_node_info *get_node(u32 id, void *data); + +#define LE(op1, op2) (op1 <= op2) +#define LT(op1, op2) (op1 < op2) +#define GE(op1, op2) (op1 >= op2) +#define GT(op1, op2) (op1 > op2) +#define NB_ID (0x201) + +static struct rule_node_info *get_node(u32 id, void *data) +{ + struct rule_node_info *node_it = NULL; + struct rule_node_info *node_match = NULL; + + list_for_each_entry(node_it, &node_list, link) { + if (node_it->id == id) { + if ((id == NB_ID)) { + if ((node_it->data == data)) { + node_match = node_it; + break; + } + } else { + node_match = node_it; + break; + } + } + } + return node_match; +} + +static struct rule_node_info *gen_node(u32 id, void *data) +{ + struct rule_node_info *node_it = NULL; + struct rule_node_info *node_match = NULL; + + list_for_each_entry(node_it, &node_list, link) { + if (node_it->id == id) { + node_match = node_it; + break; + } + } + + if (!node_match) { + node_match = kzalloc(sizeof(struct rule_node_info), GFP_KERNEL); + if (!node_match) { + pr_err("%s: Cannot allocate memory", __func__); + goto exit_node_match; + } + + node_match->id = id; + node_match->cur_rule = -1; + node_match->num_rules = 0; + node_match->data = data; + list_add_tail(&node_match->link, &node_list); + INIT_LIST_HEAD(&node_match->node_rules); + RAW_INIT_NOTIFIER_HEAD(&node_match->rule_notify_list); + pr_debug("Added new node %d to list\n", id); + } +exit_node_match: + return node_match; +} + +static bool do_compare_op(u64 op1, u64 op2, int op) +{ + bool ret = false; + + switch (op) { + case OP_LE: + ret = LE(op1, op2); + break; + case OP_LT: + ret = LT(op1, op2); + break; + case OP_GT: + ret = GT(op1, op2); + break; + case OP_GE: + ret = GE(op1, op2); + break; + case OP_NOOP: + ret = true; + break; + default: + pr_info("Invalid OP %d", op); + break; + } + return ret; +} + +static void update_src_id_vote(struct rule_update_path_info *inp_node, + struct rule_node_info *rule_node) +{ + struct rules_def *rule; + int i; + + list_for_each_entry(rule, &rule_node->node_rules, link) { + for (i = 0; i < rule->num_src; i++) { + if (rule->src_info[i].id == inp_node->id) { + rule->src_info[i].ib = inp_node->ib; + rule->src_info[i].ab = inp_node->ab; + rule->src_info[i].clk = inp_node->clk; + } + } + } +} + +static u64 get_field(struct rules_def *rule, int src_id) +{ + u64 field = 0; + int i; + + for (i = 0; i < rule->num_src; i++) { + switch (rule->rule_ops.src_field) { + case FLD_IB: + field += rule->src_info[i].ib; + break; + case FLD_AB: + field += rule->src_info[i].ab; + break; + case FLD_CLK: + field += rule->src_info[i].clk; + break; + } + } + + return field; +} + +static bool check_rule(struct rules_def *rule, + struct rule_update_path_info *inp) +{ + bool ret = false; + + if (!rule) + return ret; + + switch (rule->rule_ops.op) { + case OP_LE: + case OP_LT: + case OP_GT: + case OP_GE: + { + u64 src_field = get_field(rule, inp->id); + if (!src_field) + ret = false; + else + ret = do_compare_op(src_field, rule->rule_ops.thresh, + rule->rule_ops.op); + break; + } + default: + pr_err("Unsupported op %d", rule->rule_ops.op); + break; + } + return ret; +} + +static void match_rule(struct rule_update_path_info *inp_node, + struct rule_node_info *node) +{ + struct rules_def *rule; + int i; + bool match_found = false; + bool relevant_trans = false; + + list_for_each_entry(rule, &node->node_rules, link) { + for (i = 0; i < rule->num_src; i++) { + if (rule->src_info[i].id == inp_node->id) { + relevant_trans = true; + if (check_rule(rule, inp_node)) { + node->cur_rule = rule->rule_id; + if (rule->state == + RULE_STATE_NOT_APPLIED) { + rule->state = + RULE_STATE_APPLIED; + rule->state_change = true; + match_found = true; + } + break; + } + } + } + if (match_found) + break; + } + + if (!relevant_trans) + return; + + if (!match_found) + node->cur_rule = -1; + + list_for_each_entry(rule, &node->node_rules, link) { + if (rule->rule_id != node->cur_rule) { + if (rule->state == RULE_STATE_APPLIED) { + rule->state = RULE_STATE_NOT_APPLIED; + rule->state_change = true; + } + } + } +} + +static void apply_rule(struct rule_node_info *node, + struct list_head *output_list) +{ + struct rules_def *rule; + + list_for_each_entry(rule, &node->node_rules, link) { + if (node->id == NB_ID) { + if (rule->state_change) { + rule->state_change = false; + raw_notifier_call_chain(&node->rule_notify_list, + rule->state, (void *)&rule->rule_ops); + } + } else { + rule->state_change = false; + if ((rule->state == RULE_STATE_APPLIED)) { + node->apply.id = rule->rule_ops.dst_node[0]; + node->apply.throttle = rule->rule_ops.mode; + node->apply.lim_bw = rule->rule_ops.dst_bw; + list_add_tail(&node->apply.link, output_list); + } + } + } + +} + +int msm_rules_update_path(struct list_head *input_list, + struct list_head *output_list) +{ + int ret = 0; + struct rule_update_path_info *inp_node; + struct rule_node_info *node_it = NULL; + + mutex_lock(&msm_bus_rules_lock); + list_for_each_entry(inp_node, input_list, link) { + list_for_each_entry(node_it, &node_list, link) { + update_src_id_vote(inp_node, node_it); + match_rule(inp_node, node_it); + } + } + + list_for_each_entry(node_it, &node_list, link) + apply_rule(node_it, output_list); + + mutex_unlock(&msm_bus_rules_lock); + return ret; +} + +static bool ops_equal(int op1, int op2) +{ + bool ret = false; + + switch (op1) { + case OP_GT: + case OP_GE: + case OP_LT: + case OP_LE: + if (abs(op1 - op2) <= 1) + ret = true; + break; + default: + ret = (op1 == op2); + } + + return ret; +} + +static int node_rules_compare(void *priv, struct list_head *a, + struct list_head *b) +{ + struct rules_def *ra = container_of(a, struct rules_def, link); + struct rules_def *rb = container_of(b, struct rules_def, link); + int ret = -1; + int64_t th_diff = 0; + + + if (ra->rule_ops.mode == rb->rule_ops.mode) { + if (ops_equal(ra->rule_ops.op, rb->rule_ops.op)) { + if ((ra->rule_ops.op == OP_LT) || + (ra->rule_ops.op == OP_LE)) { + th_diff = ra->rule_ops.thresh - + rb->rule_ops.thresh; + if (th_diff > 0) + ret = 1; + else + ret = -1; + } else if ((ra->rule_ops.op == OP_GT) || + (ra->rule_ops.op == OP_GE)) { + th_diff = rb->rule_ops.thresh - + ra->rule_ops.thresh; + if (th_diff > 0) + ret = 1; + else + ret = -1; + } + } else + ret = ra->rule_ops.op - rb->rule_ops.op; + } else if ((ra->rule_ops.mode == THROTTLE_OFF) && + (rb->rule_ops.mode == THROTTLE_ON)) { + ret = 1; + } else if ((ra->rule_ops.mode == THROTTLE_ON) && + (rb->rule_ops.mode == THROTTLE_OFF)) { + ret = -1; + } + + return ret; +} + +static void print_rules(struct rule_node_info *node_it) +{ + struct rules_def *node_rule = NULL; + int i; + + if (!node_it) { + pr_err("%s: no node for found", __func__); + return; + } + + pr_info("\n Now printing rules for Node %d cur rule %d\n", + node_it->id, node_it->cur_rule); + list_for_each_entry(node_rule, &node_it->node_rules, link) { + pr_info("\n num Rules %d rule Id %d\n", + node_it->num_rules, node_rule->rule_id); + pr_info("Rule: src_field %d\n", node_rule->rule_ops.src_field); + for (i = 0; i < node_rule->rule_ops.num_src; i++) + pr_info("Rule: src %d\n", + node_rule->rule_ops.src_id[i]); + for (i = 0; i < node_rule->rule_ops.num_dst; i++) + pr_info("Rule: dst %d dst_bw %llu\n", + node_rule->rule_ops.dst_node[i], + node_rule->rule_ops.dst_bw); + pr_info("Rule: thresh %llu op %d mode %d State %d\n", + node_rule->rule_ops.thresh, + node_rule->rule_ops.op, + node_rule->rule_ops.mode, + node_rule->state); + } +} + +void print_all_rules(void) +{ + struct rule_node_info *node_it = NULL; + + list_for_each_entry(node_it, &node_list, link) + print_rules(node_it); +} + +void print_rules_buf(char *buf, int max_buf) +{ + struct rule_node_info *node_it = NULL; + struct rules_def *node_rule = NULL; + int i; + int cnt = 0; + + list_for_each_entry(node_it, &node_list, link) { + cnt += scnprintf(buf + cnt, max_buf - cnt, + "\n Now printing rules for Node %d cur_rule %d\n", + node_it->id, node_it->cur_rule); + list_for_each_entry(node_rule, &node_it->node_rules, link) { + cnt += scnprintf(buf + cnt, max_buf - cnt, + "\nNum Rules:%d ruleId %d STATE:%d change:%d\n", + node_it->num_rules, node_rule->rule_id, + node_rule->state, node_rule->state_change); + cnt += scnprintf(buf + cnt, max_buf - cnt, + "Src_field %d\n", + node_rule->rule_ops.src_field); + for (i = 0; i < node_rule->rule_ops.num_src; i++) + cnt += scnprintf(buf + cnt, max_buf - cnt, + "Src %d Cur Ib %llu Ab %llu\n", + node_rule->rule_ops.src_id[i], + node_rule->src_info[i].ib, + node_rule->src_info[i].ab); + for (i = 0; i < node_rule->rule_ops.num_dst; i++) + cnt += scnprintf(buf + cnt, max_buf - cnt, + "Dst %d dst_bw %llu\n", + node_rule->rule_ops.dst_node[0], + node_rule->rule_ops.dst_bw); + cnt += scnprintf(buf + cnt, max_buf - cnt, + "Thresh %llu op %d mode %d\n", + node_rule->rule_ops.thresh, + node_rule->rule_ops.op, + node_rule->rule_ops.mode); + } + } +} + +static int copy_rule(struct bus_rule_type *src, struct rules_def *node_rule, + struct notifier_block *nb) +{ + int i; + int ret = 0; + + memcpy(&node_rule->rule_ops, src, + sizeof(struct bus_rule_type)); + node_rule->rule_ops.src_id = kzalloc( + (sizeof(int) * node_rule->rule_ops.num_src), + GFP_KERNEL); + if (!node_rule->rule_ops.src_id) { + pr_err("%s:Failed to allocate for src_id", + __func__); + return -ENOMEM; + } + memcpy(node_rule->rule_ops.src_id, src->src_id, + sizeof(int) * src->num_src); + + + if (!nb) { + node_rule->rule_ops.dst_node = kzalloc( + (sizeof(int) * node_rule->rule_ops.num_dst), + GFP_KERNEL); + if (!node_rule->rule_ops.dst_node) { + pr_err("%s:Failed to allocate for src_id", + __func__); + return -ENOMEM; + } + memcpy(node_rule->rule_ops.dst_node, src->dst_node, + sizeof(int) * src->num_dst); + } + + node_rule->num_src = src->num_src; + node_rule->src_info = kzalloc( + (sizeof(struct node_vote_info) * node_rule->rule_ops.num_src), + GFP_KERNEL); + if (!node_rule->src_info) { + pr_err("%s:Failed to allocate for src_id", + __func__); + return -ENOMEM; + } + for (i = 0; i < src->num_src; i++) + node_rule->src_info[i].id = src->src_id[i]; + + return ret; +} + +void msm_rule_register(int num_rules, struct bus_rule_type *rule, + struct notifier_block *nb) +{ + struct rule_node_info *node = NULL; + int i, j; + struct rules_def *node_rule = NULL; + int num_dst = 0; + + if (!rule) + return; + + mutex_lock(&msm_bus_rules_lock); + for (i = 0; i < num_rules; i++) { + if (nb) + num_dst = 1; + else + num_dst = rule[i].num_dst; + + for (j = 0; j < num_dst; j++) { + int id = 0; + + if (nb) + id = NB_ID; + else + id = rule[i].dst_node[j]; + + node = gen_node(id, nb); + if (!node) { + pr_info("Error getting rule"); + goto exit_rule_register; + } + node_rule = kzalloc(sizeof(struct rules_def), + GFP_KERNEL); + if (!node_rule) { + pr_err("%s: Failed to allocate for rule", + __func__); + goto exit_rule_register; + } + + if (copy_rule(&rule[i], node_rule, nb)) { + pr_err("Error copying rule"); + goto exit_rule_register; + } + + node_rule->rule_id = node->num_rules++; + if (nb) + node->data = nb; + + list_add_tail(&node_rule->link, &node->node_rules); + } + } + list_sort(NULL, &node->node_rules, node_rules_compare); + + if (nb) + raw_notifier_chain_register(&node->rule_notify_list, nb); +exit_rule_register: + mutex_unlock(&msm_bus_rules_lock); + return; +} + +static int comp_rules(struct bus_rule_type *rulea, struct bus_rule_type *ruleb) +{ + int ret = 1; + + if (rulea->num_src == ruleb->num_src) + ret = memcmp(rulea->src_id, ruleb->src_id, + (sizeof(int) * rulea->num_src)); + if (!ret && (rulea->num_dst == ruleb->num_dst)) + ret = memcmp(rulea->dst_node, ruleb->dst_node, + (sizeof(int) * rulea->num_dst)); + if (!ret && (rulea->dst_bw == ruleb->dst_bw) && + (rulea->op == ruleb->op) && (rulea->thresh == ruleb->thresh)) + ret = 0; + + return ret; +} + +void msm_rule_unregister(int num_rules, struct bus_rule_type *rule, + struct notifier_block *nb) +{ + int i; + struct rule_node_info *node = NULL; + struct rule_node_info *node_tmp = NULL; + struct rules_def *node_rule; + struct rules_def *node_rule_tmp; + bool match_found = false; + + if (!rule) + return; + + mutex_lock(&msm_bus_rules_lock); + if (nb) { + node = get_node(NB_ID, nb); + if (!node) { + pr_err("%s: Can't find node", __func__); + goto exit_unregister_rule; + } + + list_for_each_entry_safe(node_rule, node_rule_tmp, + &node->node_rules, link) { + list_del(&node_rule->link); + kfree(node_rule); + node->num_rules--; + } + raw_notifier_chain_unregister(&node->rule_notify_list, nb); + } else { + for (i = 0; i < num_rules; i++) { + match_found = false; + + list_for_each_entry(node, &node_list, link) { + list_for_each_entry_safe(node_rule, + node_rule_tmp, &node->node_rules, link) { + if (comp_rules(&node_rule->rule_ops, + &rule[i]) == 0) { + list_del(&node_rule->link); + kfree(node_rule); + match_found = true; + node->num_rules--; + list_sort(NULL, + &node->node_rules, + node_rules_compare); + break; + } + } + } + } + } + + list_for_each_entry_safe(node, node_tmp, + &node_list, link) { + if (!node->num_rules) { + pr_debug("Deleting Rule node %d", node->id); + list_del(&node->link); + kfree(node); + } + } +exit_unregister_rule: + mutex_unlock(&msm_bus_rules_lock); +} + +bool msm_rule_are_rules_registered(void) +{ + bool ret = false; + + if (list_empty(&node_list)) + ret = false; + else + ret = true; + + return ret; +} + diff --git a/drivers/soc/qcom/msm_bus/msm_buspm_coresight.c b/drivers/soc/qcom/msm_bus/msm_buspm_coresight.c new file mode 100644 index 000000000000..cf8216c860df --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_buspm_coresight.c @@ -0,0 +1,158 @@ +/* Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/of_coresight.h> +#include <linux/coresight.h> +#include <linux/io.h> +#include <linux/of.h> + +struct msmbus_coresight_drvdata { + struct device *dev; + struct coresight_device *csdev; + struct clk *clk; + const char *clk_name; + const char *clknode; +}; + +static int msmbus_coresight_enable(struct coresight_device *csdev) +{ + struct msmbus_coresight_drvdata *drvdata = + dev_get_drvdata(csdev->dev.parent); + + return clk_prepare_enable(drvdata->clk); +} + +static void msmbus_coresight_disable(struct coresight_device *csdev) +{ + struct msmbus_coresight_drvdata *drvdata = + dev_get_drvdata(csdev->dev.parent); + + clk_disable_unprepare(drvdata->clk); +} + +static const struct coresight_ops_source msmbus_coresight_source_ops = { + .enable = msmbus_coresight_enable, + .disable = msmbus_coresight_disable, +}; + +static const struct coresight_ops msmbus_coresight_cs_ops = { + .source_ops = &msmbus_coresight_source_ops, +}; + +void msmbus_coresight_remove(struct platform_device *pdev) +{ + struct msmbus_coresight_drvdata *drvdata = platform_get_drvdata(pdev); + + msmbus_coresight_disable(drvdata->csdev); + coresight_unregister(drvdata->csdev); + devm_kfree(&pdev->dev, drvdata); + platform_set_drvdata(pdev, NULL); +} +EXPORT_SYMBOL(msmbus_coresight_remove); + +static int buspm_of_get_clk(struct device_node *of_node, + struct msmbus_coresight_drvdata *drvdata) +{ + if (of_property_read_string(of_node, "qcom,fabclk-dual", + &drvdata->clk_name)) { + pr_err("Error: Unable to find clock from of_node\n"); + return -EINVAL; + } + + if (of_property_read_string(of_node, "label", &drvdata->clknode)) { + pr_err("Error: Unable to find clock-node from of_node\n"); + return -EINVAL; + } + + drvdata->clk = clk_get_sys(drvdata->clknode, drvdata->clk_name); + if (IS_ERR(drvdata->clk)) { + pr_err("Error: clk_get_sys failed for: %s\n", + drvdata->clknode); + return -EINVAL; + } + + return 0; +} + +int msmbus_coresight_init(struct platform_device *pdev) +{ + int ret; + struct device *dev = &pdev->dev; + struct coresight_platform_data *pdata; + struct msmbus_coresight_drvdata *drvdata; + struct coresight_desc *desc; + + if (pdev->dev.of_node) { + pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + pdev->dev.platform_data = pdata; + } + + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) { + pr_err("coresight: Alloc for drvdata failed\n"); + return -ENOMEM; + } + + drvdata->dev = &pdev->dev; + platform_set_drvdata(pdev, drvdata); + ret = buspm_of_get_clk(pdev->dev.of_node, drvdata); + if (ret) { + pr_err("Error getting clocks\n"); + ret = -ENXIO; + goto err1; + } + + desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); + if (!desc) { + pr_err("coresight: Error allocating memory\n"); + ret = -ENOMEM; + goto err1; + } + + desc->type = CORESIGHT_DEV_TYPE_SOURCE; + desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_BUS; + desc->ops = &msmbus_coresight_cs_ops; + desc->pdata = pdev->dev.platform_data; + desc->dev = &pdev->dev; + desc->owner = THIS_MODULE; + drvdata->csdev = coresight_register(desc); + if (IS_ERR(drvdata->csdev)) { + pr_err("coresight: Coresight register failed\n"); + ret = PTR_ERR(drvdata->csdev); + goto err0; + } + + dev_info(dev, "msmbus_coresight initialized\n"); + + return 0; +err0: + devm_kfree(dev, desc); +err1: + devm_kfree(dev, drvdata); + platform_set_drvdata(pdev, NULL); + return ret; +} +EXPORT_SYMBOL(msmbus_coresight_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MSM BusPM CoreSight Driver"); diff --git a/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c b/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c new file mode 100644 index 000000000000..c154878ed621 --- /dev/null +++ b/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c @@ -0,0 +1,189 @@ +/* Copyright (c) 2014 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> +#include <linux/of_coresight.h> +#include <linux/coresight.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/list.h> + +struct msmbus_coresight_adhoc_clock_drvdata { + int id; + struct clk *clk; + struct list_head list; +}; + +struct msmbus_coresight_adhoc_drvdata { + struct device *dev; + struct coresight_device *csdev; + struct coresight_desc *desc; + struct list_head clocks; +}; + +static int msmbus_coresight_enable_adhoc(struct coresight_device *csdev) +{ + struct msmbus_coresight_adhoc_clock_drvdata *clk; + struct msmbus_coresight_adhoc_drvdata *drvdata = + dev_get_drvdata(csdev->dev.parent); + long rate; + + list_for_each_entry(clk, &drvdata->clocks, list) { + if (clk->id == csdev->id) { + rate = clk_round_rate(clk->clk, 1L); + clk_set_rate(clk->clk, rate); + return clk_prepare_enable(clk->clk); + } + } + + return -ENOENT; +} + +static void msmbus_coresight_disable_adhoc(struct coresight_device *csdev) +{ + struct msmbus_coresight_adhoc_clock_drvdata *clk; + struct msmbus_coresight_adhoc_drvdata *drvdata = + dev_get_drvdata(csdev->dev.parent); + + list_for_each_entry(clk, &drvdata->clocks, list) { + if (clk->id == csdev->id) + clk_disable_unprepare(clk->clk); + } +} + +static const struct coresight_ops_source msmbus_coresight_adhoc_source_ops = { + .enable = msmbus_coresight_enable_adhoc, + .disable = msmbus_coresight_disable_adhoc, +}; + +static const struct coresight_ops msmbus_coresight_cs_ops = { + .source_ops = &msmbus_coresight_adhoc_source_ops, +}; + +void msmbus_coresight_remove_adhoc(struct platform_device *pdev) +{ + struct msmbus_coresight_adhoc_clock_drvdata *clk, *next_clk; + struct msmbus_coresight_adhoc_drvdata *drvdata = + platform_get_drvdata(pdev); + + msmbus_coresight_disable_adhoc(drvdata->csdev); + coresight_unregister(drvdata->csdev); + list_for_each_entry_safe(clk, next_clk, &drvdata->clocks, list) { + list_del(&clk->list); + devm_kfree(&pdev->dev, clk); + } + devm_kfree(&pdev->dev, drvdata->desc); + devm_kfree(&pdev->dev, drvdata); + platform_set_drvdata(pdev, NULL); +} +EXPORT_SYMBOL(msmbus_coresight_remove_adhoc); + +static int buspm_of_get_clk_adhoc(struct device_node *of_node, + struct msmbus_coresight_adhoc_drvdata *drvdata, int id) +{ + struct msmbus_coresight_adhoc_clock_drvdata *clk; + clk = devm_kzalloc(drvdata->dev, sizeof(*clk), GFP_KERNEL); + + if (!clk) + return -ENOMEM; + + clk->id = id; + + clk->clk = of_clk_get_by_name(of_node, "bus_clk"); + if (IS_ERR(clk->clk)) { + pr_err("Error: unable to get clock for coresight node %d\n", + id); + goto err; + } + + list_add(&clk->list, &drvdata->clocks); + return 0; + +err: + devm_kfree(drvdata->dev, clk); + return -EINVAL; +} + +int msmbus_coresight_init_adhoc(struct platform_device *pdev, + struct device_node *of_node) +{ + int ret; + struct device *dev = &pdev->dev; + struct coresight_platform_data *pdata; + struct msmbus_coresight_adhoc_drvdata *drvdata; + struct coresight_desc *desc; + + pdata = of_get_coresight_platform_data(dev, of_node); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + + drvdata = platform_get_drvdata(pdev); + if (IS_ERR_OR_NULL(drvdata)) { + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) { + pr_err("coresight: Alloc for drvdata failed\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&drvdata->clocks); + drvdata->dev = &pdev->dev; + platform_set_drvdata(pdev, drvdata); + } + ret = buspm_of_get_clk_adhoc(of_node, drvdata, pdata->id); + if (ret) { + pr_err("Error getting clocks\n"); + ret = -ENXIO; + goto err1; + } + + desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); + if (!desc) { + pr_err("coresight: Error allocating memory\n"); + ret = -ENOMEM; + goto err1; + } + + desc->type = CORESIGHT_DEV_TYPE_SOURCE; + desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_BUS; + desc->ops = &msmbus_coresight_cs_ops; + desc->pdata = pdata; + desc->dev = &pdev->dev; + desc->owner = THIS_MODULE; + drvdata->desc = desc; + drvdata->csdev = coresight_register(desc); + if (IS_ERR(drvdata->csdev)) { + pr_err("coresight: Coresight register failed\n"); + ret = PTR_ERR(drvdata->csdev); + goto err0; + } + + dev_info(dev, "msmbus_coresight initialized\n"); + + return 0; +err0: + devm_kfree(dev, desc); +err1: + devm_kfree(dev, drvdata); + platform_set_drvdata(pdev, NULL); + return ret; +} +EXPORT_SYMBOL(msmbus_coresight_init_adhoc); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MSM BusPM Adhoc CoreSight Driver"); diff --git a/drivers/soc/qcom/msm_bus/qcom_rpm_msm_bus.c b/drivers/soc/qcom/msm_bus/qcom_rpm_msm_bus.c new file mode 100644 index 000000000000..cc786252223e --- /dev/null +++ b/drivers/soc/qcom/msm_bus/qcom_rpm_msm_bus.c @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2015, The Linux foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License rev 2 and + * only rev 2 as published by the free Software foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/msm-bus.h> +#include <linux/soc/qcom/smd-rpm.h> + +struct qcom_rpm_msm_bus_info { + struct qcom_smd_rpm *rpm; +}; + +static struct qcom_rpm_msm_bus_info rpm_bus_info; + +int qcom_rpm_bus_send_message(int ctx, int rsc_type, int id, + struct qcom_msm_bus_req *req) +{ + return qcom_rpm_smd_write(rpm_bus_info.rpm, ctx, rsc_type, id, req, + sizeof(*req)); +} +EXPORT_SYMBOL(qcom_rpm_bus_send_message); + +static int rpm_msm_bus_probe(struct platform_device *pdev) +{ + rpm_bus_info.rpm = dev_get_drvdata(pdev->dev.parent); + if (!rpm_bus_info.rpm) { + dev_err(&pdev->dev, "unable to retrieve handle to rpm\n"); + return -ENODEV; + } + + return 0; +} + +static const struct of_device_id rpm_msm_bus_dt_match[] = { + { .compatible = "qcom,rpm-msm-bus", }, + { }, +}; + +MODULE_DEVICE_TABLE(of, rpm_msm_bus_dt_match); + +static struct platform_driver rpm_msm_bus_driver = { + .driver = { + .name = "rpm-msm-bus", + .of_match_table = rpm_msm_bus_dt_match, + }, + .probe = rpm_msm_bus_probe, +}; + +module_platform_driver(rpm_msm_bus_driver); + +MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>"); +MODULE_DESCRIPTION("QCOM RPM msm bus driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index 6609d7e0edb0..0dcf1bf33126 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -19,7 +19,7 @@ #include <linux/interrupt.h> #include <linux/slab.h> -#include <linux/soc/qcom/smd.h> +#include <linux/rpmsg.h> #include <linux/soc/qcom/smd-rpm.h> #define RPM_REQUEST_TIMEOUT (5 * HZ) @@ -32,7 +32,7 @@ * @ack_status: result of the rpm request */ struct qcom_smd_rpm { - struct qcom_smd_channel *rpm_channel; + struct rpmsg_endpoint *rpm_channel; struct device *dev; struct completion ack; @@ -133,7 +133,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, pkt->req.data_len = cpu_to_le32(count); memcpy(pkt->payload, buf, count); - ret = qcom_smd_send(rpm->rpm_channel, pkt, size); + ret = rpmsg_send(rpm->rpm_channel, pkt, size); if (ret) goto out; @@ -150,14 +150,16 @@ out: } EXPORT_SYMBOL(qcom_rpm_smd_write); -static int qcom_smd_rpm_callback(struct qcom_smd_channel *channel, - const void *data, - size_t count) +static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev, + void *data, + int count, + void *priv, + u32 addr) { const struct qcom_rpm_header *hdr = data; size_t hdr_length = le32_to_cpu(hdr->length); const struct qcom_rpm_message *msg; - struct qcom_smd_rpm *rpm = qcom_smd_get_drvdata(channel); + struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev); const u8 *buf = data + sizeof(struct qcom_rpm_header); const u8 *end = buf + hdr_length; char msgbuf[32]; @@ -196,29 +198,27 @@ static int qcom_smd_rpm_callback(struct qcom_smd_channel *channel, return 0; } -static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev) +static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev) { struct qcom_smd_rpm *rpm; - rpm = devm_kzalloc(&sdev->dev, sizeof(*rpm), GFP_KERNEL); + rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL); if (!rpm) return -ENOMEM; mutex_init(&rpm->lock); init_completion(&rpm->ack); - rpm->dev = &sdev->dev; - rpm->rpm_channel = sdev->channel; - qcom_smd_set_drvdata(sdev->channel, rpm); + rpm->dev = &rpdev->dev; + rpm->rpm_channel = rpdev->ept; + dev_set_drvdata(&rpdev->dev, rpm); - dev_set_drvdata(&sdev->dev, rpm); - - return of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev); + return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev); } -static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev) +static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev) { - of_platform_depopulate(&sdev->dev); + of_platform_depopulate(&rpdev->dev); } static const struct of_device_id qcom_smd_rpm_of_match[] = { @@ -229,26 +229,25 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = { }; MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match); -static struct qcom_smd_driver qcom_smd_rpm_driver = { +static struct rpmsg_driver qcom_smd_rpm_driver = { .probe = qcom_smd_rpm_probe, .remove = qcom_smd_rpm_remove, .callback = qcom_smd_rpm_callback, - .driver = { + .drv = { .name = "qcom_smd_rpm", - .owner = THIS_MODULE, .of_match_table = qcom_smd_rpm_of_match, }, }; static int __init qcom_smd_rpm_init(void) { - return qcom_smd_driver_register(&qcom_smd_rpm_driver); + return register_rpmsg_driver(&qcom_smd_rpm_driver); } arch_initcall(qcom_smd_rpm_init); static void __exit qcom_smd_rpm_exit(void) { - qcom_smd_driver_unregister(&qcom_smd_rpm_driver); + unregister_rpmsg_driver(&qcom_smd_rpm_driver); } module_exit(qcom_smd_rpm_exit); diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c deleted file mode 100644 index 322034ab9d37..000000000000 --- a/drivers/soc/qcom/smd.c +++ /dev/null @@ -1,1560 +0,0 @@ -/* - * Copyright (c) 2015, Sony Mobile Communications AB. - * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/mfd/syscon.h> -#include <linux/module.h> -#include <linux/of_irq.h> -#include <linux/of_platform.h> -#include <linux/platform_device.h> -#include <linux/regmap.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/soc/qcom/smd.h> -#include <linux/soc/qcom/smem.h> -#include <linux/wait.h> - -/* - * The Qualcomm Shared Memory communication solution provides point-to-point - * channels for clients to send and receive streaming or packet based data. - * - * Each channel consists of a control item (channel info) and a ring buffer - * pair. The channel info carry information related to channel state, flow - * control and the offsets within the ring buffer. - * - * All allocated channels are listed in an allocation table, identifying the - * pair of items by name, type and remote processor. - * - * Upon creating a new channel the remote processor allocates channel info and - * ring buffer items from the smem heap and populate the allocation table. An - * interrupt is sent to the other end of the channel and a scan for new - * channels should be done. A channel never goes away, it will only change - * state. - * - * The remote processor signals it intent for bring up the communication - * channel by setting the state of its end of the channel to "opening" and - * sends out an interrupt. We detect this change and register a smd device to - * consume the channel. Upon finding a consumer we finish the handshake and the - * channel is up. - * - * Upon closing a channel, the remote processor will update the state of its - * end of the channel and signal us, we will then unregister any attached - * device and close our end of the channel. - * - * Devices attached to a channel can use the qcom_smd_send function to push - * data to the channel, this is done by copying the data into the tx ring - * buffer, updating the pointers in the channel info and signaling the remote - * processor. - * - * The remote processor does the equivalent when it transfer data and upon - * receiving the interrupt we check the channel info for new data and delivers - * this to the attached device. If the device is not ready to receive the data - * we leave it in the ring buffer for now. - */ - -struct smd_channel_info; -struct smd_channel_info_pair; -struct smd_channel_info_word; -struct smd_channel_info_word_pair; - -#define SMD_ALLOC_TBL_COUNT 2 -#define SMD_ALLOC_TBL_SIZE 64 - -/* - * This lists the various smem heap items relevant for the allocation table and - * smd channel entries. - */ -static const struct { - unsigned alloc_tbl_id; - unsigned info_base_id; - unsigned fifo_base_id; -} smem_items[SMD_ALLOC_TBL_COUNT] = { - { - .alloc_tbl_id = 13, - .info_base_id = 14, - .fifo_base_id = 338 - }, - { - .alloc_tbl_id = 266, - .info_base_id = 138, - .fifo_base_id = 202, - }, -}; - -/** - * struct qcom_smd_edge - representing a remote processor - * @dev: device for this edge - * @of_node: of_node handle for information related to this edge - * @edge_id: identifier of this edge - * @remote_pid: identifier of remote processor - * @irq: interrupt for signals on this edge - * @ipc_regmap: regmap handle holding the outgoing ipc register - * @ipc_offset: offset within @ipc_regmap of the register for ipc - * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap - * @channels: list of all channels detected on this edge - * @channels_lock: guard for modifications of @channels - * @allocated: array of bitmaps representing already allocated channels - * @smem_available: last available amount of smem triggering a channel scan - * @scan_work: work item for discovering new channels - * @state_work: work item for edge state changes - */ -struct qcom_smd_edge { - struct device dev; - - struct device_node *of_node; - unsigned edge_id; - unsigned remote_pid; - - int irq; - - struct regmap *ipc_regmap; - int ipc_offset; - int ipc_bit; - - struct list_head channels; - spinlock_t channels_lock; - - DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE); - - unsigned smem_available; - - wait_queue_head_t new_channel_event; - - struct work_struct scan_work; - struct work_struct state_work; -}; - -#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev) - -/* - * SMD channel states. - */ -enum smd_channel_state { - SMD_CHANNEL_CLOSED, - SMD_CHANNEL_OPENING, - SMD_CHANNEL_OPENED, - SMD_CHANNEL_FLUSHING, - SMD_CHANNEL_CLOSING, - SMD_CHANNEL_RESET, - SMD_CHANNEL_RESET_OPENING -}; - -/** - * struct qcom_smd_channel - smd channel struct - * @edge: qcom_smd_edge this channel is living on - * @qsdev: reference to a associated smd client device - * @name: name of the channel - * @state: local state of the channel - * @remote_state: remote state of the channel - * @info: byte aligned outgoing/incoming channel info - * @info_word: word aligned outgoing/incoming channel info - * @tx_lock: lock to make writes to the channel mutually exclusive - * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR - * @tx_fifo: pointer to the outgoing ring buffer - * @rx_fifo: pointer to the incoming ring buffer - * @fifo_size: size of each ring buffer - * @bounce_buffer: bounce buffer for reading wrapped packets - * @cb: callback function registered for this channel - * @recv_lock: guard for rx info modifications and cb pointer - * @pkt_size: size of the currently handled packet - * @list: lite entry for @channels in qcom_smd_edge - */ -struct qcom_smd_channel { - struct qcom_smd_edge *edge; - - struct qcom_smd_device *qsdev; - - char *name; - enum smd_channel_state state; - enum smd_channel_state remote_state; - - struct smd_channel_info_pair *info; - struct smd_channel_info_word_pair *info_word; - - struct mutex tx_lock; - wait_queue_head_t fblockread_event; - - void *tx_fifo; - void *rx_fifo; - int fifo_size; - - void *bounce_buffer; - qcom_smd_cb_t cb; - - spinlock_t recv_lock; - - int pkt_size; - - void *drvdata; - - struct list_head list; -}; - -/* - * Format of the smd_info smem items, for byte aligned channels. - */ -struct smd_channel_info { - __le32 state; - u8 fDSR; - u8 fCTS; - u8 fCD; - u8 fRI; - u8 fHEAD; - u8 fTAIL; - u8 fSTATE; - u8 fBLOCKREADINTR; - __le32 tail; - __le32 head; -}; - -struct smd_channel_info_pair { - struct smd_channel_info tx; - struct smd_channel_info rx; -}; - -/* - * Format of the smd_info smem items, for word aligned channels. - */ -struct smd_channel_info_word { - __le32 state; - __le32 fDSR; - __le32 fCTS; - __le32 fCD; - __le32 fRI; - __le32 fHEAD; - __le32 fTAIL; - __le32 fSTATE; - __le32 fBLOCKREADINTR; - __le32 tail; - __le32 head; -}; - -struct smd_channel_info_word_pair { - struct smd_channel_info_word tx; - struct smd_channel_info_word rx; -}; - -#define GET_RX_CHANNEL_FLAG(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ - channel->info_word ? \ - le32_to_cpu(channel->info_word->rx.param) : \ - channel->info->rx.param; \ - }) - -#define GET_RX_CHANNEL_INFO(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ - le32_to_cpu(channel->info_word ? \ - channel->info_word->rx.param : \ - channel->info->rx.param); \ - }) - -#define SET_RX_CHANNEL_FLAG(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ - if (channel->info_word) \ - channel->info_word->rx.param = cpu_to_le32(value); \ - else \ - channel->info->rx.param = value; \ - }) - -#define SET_RX_CHANNEL_INFO(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ - if (channel->info_word) \ - channel->info_word->rx.param = cpu_to_le32(value); \ - else \ - channel->info->rx.param = cpu_to_le32(value); \ - }) - -#define GET_TX_CHANNEL_FLAG(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ - channel->info_word ? \ - le32_to_cpu(channel->info_word->tx.param) : \ - channel->info->tx.param; \ - }) - -#define GET_TX_CHANNEL_INFO(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ - le32_to_cpu(channel->info_word ? \ - channel->info_word->tx.param : \ - channel->info->tx.param); \ - }) - -#define SET_TX_CHANNEL_FLAG(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ - if (channel->info_word) \ - channel->info_word->tx.param = cpu_to_le32(value); \ - else \ - channel->info->tx.param = value; \ - }) - -#define SET_TX_CHANNEL_INFO(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ - if (channel->info_word) \ - channel->info_word->tx.param = cpu_to_le32(value); \ - else \ - channel->info->tx.param = cpu_to_le32(value); \ - }) - -/** - * struct qcom_smd_alloc_entry - channel allocation entry - * @name: channel name - * @cid: channel index - * @flags: channel flags and edge id - * @ref_count: reference count of the channel - */ -struct qcom_smd_alloc_entry { - u8 name[20]; - __le32 cid; - __le32 flags; - __le32 ref_count; -} __packed; - -#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff -#define SMD_CHANNEL_FLAGS_STREAM BIT(8) -#define SMD_CHANNEL_FLAGS_PACKET BIT(9) - -/* - * Each smd packet contains a 20 byte header, with the first 4 being the length - * of the packet. - */ -#define SMD_PACKET_HEADER_LEN 20 - -/* - * Signal the remote processor associated with 'channel'. - */ -static void qcom_smd_signal_channel(struct qcom_smd_channel *channel) -{ - struct qcom_smd_edge *edge = channel->edge; - - regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit)); -} - -/* - * Initialize the tx channel info - */ -static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) -{ - SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); - SET_TX_CHANNEL_FLAG(channel, fDSR, 0); - SET_TX_CHANNEL_FLAG(channel, fCTS, 0); - SET_TX_CHANNEL_FLAG(channel, fCD, 0); - SET_TX_CHANNEL_FLAG(channel, fRI, 0); - SET_TX_CHANNEL_FLAG(channel, fHEAD, 0); - SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); - SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); - SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); - SET_TX_CHANNEL_INFO(channel, head, 0); - SET_RX_CHANNEL_INFO(channel, tail, 0); - - qcom_smd_signal_channel(channel); - - channel->state = SMD_CHANNEL_CLOSED; - channel->pkt_size = 0; -} - -/* - * Set the callback for a channel, with appropriate locking - */ -static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel, - qcom_smd_cb_t cb) -{ - unsigned long flags; - - spin_lock_irqsave(&channel->recv_lock, flags); - channel->cb = cb; - spin_unlock_irqrestore(&channel->recv_lock, flags); -}; - -/* - * Calculate the amount of data available in the rx fifo - */ -static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel) -{ - unsigned head; - unsigned tail; - - head = GET_RX_CHANNEL_INFO(channel, head); - tail = GET_RX_CHANNEL_INFO(channel, tail); - - return (head - tail) & (channel->fifo_size - 1); -} - -/* - * Set tx channel state and inform the remote processor - */ -static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel, - int state) -{ - struct qcom_smd_edge *edge = channel->edge; - bool is_open = state == SMD_CHANNEL_OPENED; - - if (channel->state == state) - return; - - dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state); - - SET_TX_CHANNEL_FLAG(channel, fDSR, is_open); - SET_TX_CHANNEL_FLAG(channel, fCTS, is_open); - SET_TX_CHANNEL_FLAG(channel, fCD, is_open); - - SET_TX_CHANNEL_INFO(channel, state, state); - SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); - - channel->state = state; - qcom_smd_signal_channel(channel); -} - -/* - * Copy count bytes of data using 32bit accesses, if that's required. - */ -static void smd_copy_to_fifo(void __iomem *dst, - const void *src, - size_t count, - bool word_aligned) -{ - if (word_aligned) { - __iowrite32_copy(dst, src, count / sizeof(u32)); - } else { - memcpy_toio(dst, src, count); - } -} - -/* - * Copy count bytes of data using 32bit accesses, if that is required. - */ -static void smd_copy_from_fifo(void *dst, - const void __iomem *src, - size_t count, - bool word_aligned) -{ - if (word_aligned) { - __ioread32_copy(dst, src, count / sizeof(u32)); - } else { - memcpy_fromio(dst, src, count); - } -} - -/* - * Read count bytes of data from the rx fifo into buf, but don't advance the - * tail. - */ -static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel, - void *buf, size_t count) -{ - bool word_aligned; - unsigned tail; - size_t len; - - word_aligned = channel->info_word; - tail = GET_RX_CHANNEL_INFO(channel, tail); - - len = min_t(size_t, count, channel->fifo_size - tail); - if (len) { - smd_copy_from_fifo(buf, - channel->rx_fifo + tail, - len, - word_aligned); - } - - if (len != count) { - smd_copy_from_fifo(buf + len, - channel->rx_fifo, - count - len, - word_aligned); - } - - return count; -} - -/* - * Advance the rx tail by count bytes. - */ -static void qcom_smd_channel_advance(struct qcom_smd_channel *channel, - size_t count) -{ - unsigned tail; - - tail = GET_RX_CHANNEL_INFO(channel, tail); - tail += count; - tail &= (channel->fifo_size - 1); - SET_RX_CHANNEL_INFO(channel, tail, tail); -} - -/* - * Read out a single packet from the rx fifo and deliver it to the device - */ -static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel) -{ - unsigned tail; - size_t len; - void *ptr; - int ret; - - if (!channel->cb) - return 0; - - tail = GET_RX_CHANNEL_INFO(channel, tail); - - /* Use bounce buffer if the data wraps */ - if (tail + channel->pkt_size >= channel->fifo_size) { - ptr = channel->bounce_buffer; - len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size); - } else { - ptr = channel->rx_fifo + tail; - len = channel->pkt_size; - } - - ret = channel->cb(channel, ptr, len); - if (ret < 0) - return ret; - - /* Only forward the tail if the client consumed the data */ - qcom_smd_channel_advance(channel, len); - - channel->pkt_size = 0; - - return 0; -} - -/* - * Per channel interrupt handling - */ -static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) -{ - bool need_state_scan = false; - int remote_state; - __le32 pktlen; - int avail; - int ret; - - /* Handle state changes */ - remote_state = GET_RX_CHANNEL_INFO(channel, state); - if (remote_state != channel->remote_state) { - channel->remote_state = remote_state; - need_state_scan = true; - } - /* Indicate that we have seen any state change */ - SET_RX_CHANNEL_FLAG(channel, fSTATE, 0); - - /* Signal waiting qcom_smd_send() about the interrupt */ - if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) - wake_up_interruptible(&channel->fblockread_event); - - /* Don't consume any data until we've opened the channel */ - if (channel->state != SMD_CHANNEL_OPENED) - goto out; - - /* Indicate that we've seen the new data */ - SET_RX_CHANNEL_FLAG(channel, fHEAD, 0); - - /* Consume data */ - for (;;) { - avail = qcom_smd_channel_get_rx_avail(channel); - - if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) { - qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen)); - qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN); - channel->pkt_size = le32_to_cpu(pktlen); - } else if (channel->pkt_size && avail >= channel->pkt_size) { - ret = qcom_smd_channel_recv_single(channel); - if (ret) - break; - } else { - break; - } - } - - /* Indicate that we have seen and updated tail */ - SET_RX_CHANNEL_FLAG(channel, fTAIL, 1); - - /* Signal the remote that we've consumed the data (if requested) */ - if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) { - /* Ensure ordering of channel info updates */ - wmb(); - - qcom_smd_signal_channel(channel); - } - -out: - return need_state_scan; -} - -/* - * The edge interrupts are triggered by the remote processor on state changes, - * channel info updates or when new channels are created. - */ -static irqreturn_t qcom_smd_edge_intr(int irq, void *data) -{ - struct qcom_smd_edge *edge = data; - struct qcom_smd_channel *channel; - unsigned available; - bool kick_scanner = false; - bool kick_state = false; - - /* - * Handle state changes or data on each of the channels on this edge - */ - spin_lock(&edge->channels_lock); - list_for_each_entry(channel, &edge->channels, list) { - spin_lock(&channel->recv_lock); - kick_state |= qcom_smd_channel_intr(channel); - spin_unlock(&channel->recv_lock); - } - spin_unlock(&edge->channels_lock); - - /* - * Creating a new channel requires allocating an smem entry, so we only - * have to scan if the amount of available space in smem have changed - * since last scan. - */ - available = qcom_smem_get_free_space(edge->remote_pid); - if (available != edge->smem_available) { - edge->smem_available = available; - kick_scanner = true; - } - - if (kick_scanner) - schedule_work(&edge->scan_work); - if (kick_state) - schedule_work(&edge->state_work); - - return IRQ_HANDLED; -} - -/* - * Delivers any outstanding packets in the rx fifo, can be used after probe of - * the clients to deliver any packets that wasn't delivered before the client - * was setup. - */ -static void qcom_smd_channel_resume(struct qcom_smd_channel *channel) -{ - unsigned long flags; - - spin_lock_irqsave(&channel->recv_lock, flags); - qcom_smd_channel_intr(channel); - spin_unlock_irqrestore(&channel->recv_lock, flags); -} - -/* - * Calculate how much space is available in the tx fifo. - */ -static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel) -{ - unsigned head; - unsigned tail; - unsigned mask = channel->fifo_size - 1; - - head = GET_TX_CHANNEL_INFO(channel, head); - tail = GET_TX_CHANNEL_INFO(channel, tail); - - return mask - ((head - tail) & mask); -} - -/* - * Write count bytes of data into channel, possibly wrapping in the ring buffer - */ -static int qcom_smd_write_fifo(struct qcom_smd_channel *channel, - const void *data, - size_t count) -{ - bool word_aligned; - unsigned head; - size_t len; - - word_aligned = channel->info_word; - head = GET_TX_CHANNEL_INFO(channel, head); - - len = min_t(size_t, count, channel->fifo_size - head); - if (len) { - smd_copy_to_fifo(channel->tx_fifo + head, - data, - len, - word_aligned); - } - - if (len != count) { - smd_copy_to_fifo(channel->tx_fifo, - data + len, - count - len, - word_aligned); - } - - head += count; - head &= (channel->fifo_size - 1); - SET_TX_CHANNEL_INFO(channel, head, head); - - return count; -} - -/** - * qcom_smd_send - write data to smd channel - * @channel: channel handle - * @data: buffer of data to write - * @len: number of bytes to write - * - * This is a blocking write of len bytes into the channel's tx ring buffer and - * signal the remote end. It will sleep until there is enough space available - * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid - * polling. - */ -int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) -{ - __le32 hdr[5] = { cpu_to_le32(len), }; - int tlen = sizeof(hdr) + len; - int ret; - - /* Word aligned channels only accept word size aligned data */ - if (channel->info_word && len % 4) - return -EINVAL; - - /* Reject packets that are too big */ - if (tlen >= channel->fifo_size) - return -EINVAL; - - ret = mutex_lock_interruptible(&channel->tx_lock); - if (ret) - return ret; - - while (qcom_smd_get_tx_avail(channel) < tlen) { - if (channel->state != SMD_CHANNEL_OPENED) { - ret = -EPIPE; - goto out; - } - - SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0); - - ret = wait_event_interruptible(channel->fblockread_event, - qcom_smd_get_tx_avail(channel) >= tlen || - channel->state != SMD_CHANNEL_OPENED); - if (ret) - goto out; - - SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); - } - - SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); - - qcom_smd_write_fifo(channel, hdr, sizeof(hdr)); - qcom_smd_write_fifo(channel, data, len); - - SET_TX_CHANNEL_FLAG(channel, fHEAD, 1); - - /* Ensure ordering of channel info updates */ - wmb(); - - qcom_smd_signal_channel(channel); - -out: - mutex_unlock(&channel->tx_lock); - - return ret; -} -EXPORT_SYMBOL(qcom_smd_send); - -static struct qcom_smd_device *to_smd_device(struct device *dev) -{ - return container_of(dev, struct qcom_smd_device, dev); -} - -static struct qcom_smd_driver *to_smd_driver(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - - return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver); -} - -static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver); - const struct qcom_smd_id *match = qsdrv->smd_match_table; - const char *name = qsdev->channel->name; - - if (match) { - while (match->name[0]) { - if (!strcmp(match->name, name)) - return 1; - match++; - } - } - - return of_driver_match_device(dev, drv); -} - -/* - * Helper for opening a channel - */ -static int qcom_smd_channel_open(struct qcom_smd_channel *channel, - qcom_smd_cb_t cb) -{ - size_t bb_size; - - /* - * Packets are maximum 4k, but reduce if the fifo is smaller - */ - bb_size = min(channel->fifo_size, SZ_4K); - channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL); - if (!channel->bounce_buffer) - return -ENOMEM; - - qcom_smd_channel_set_callback(channel, cb); - qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING); - qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED); - - return 0; -} - -/* - * Helper for closing and resetting a channel - */ -static void qcom_smd_channel_close(struct qcom_smd_channel *channel) -{ - qcom_smd_channel_set_callback(channel, NULL); - - kfree(channel->bounce_buffer); - channel->bounce_buffer = NULL; - - qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); - qcom_smd_channel_reset(channel); -} - -/* - * Probe the smd client. - * - * The remote side have indicated that it want the channel to be opened, so - * complete the state handshake and probe our client driver. - */ -static int qcom_smd_dev_probe(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = to_smd_driver(dev); - struct qcom_smd_channel *channel = qsdev->channel; - int ret; - - ret = qcom_smd_channel_open(channel, qsdrv->callback); - if (ret) - return ret; - - ret = qsdrv->probe(qsdev); - if (ret) - goto err; - - qcom_smd_channel_resume(channel); - - return 0; - -err: - dev_err(&qsdev->dev, "probe failed\n"); - - qcom_smd_channel_close(channel); - return ret; -} - -/* - * Remove the smd client. - * - * The channel is going away, for some reason, so remove the smd client and - * reset the channel state. - */ -static int qcom_smd_dev_remove(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = to_smd_driver(dev); - struct qcom_smd_channel *channel = qsdev->channel; - - qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING); - - /* - * Make sure we don't race with the code receiving data. - */ - qcom_smd_channel_set_callback(channel, NULL); - - /* Wake up any sleepers in qcom_smd_send() */ - wake_up_interruptible(&channel->fblockread_event); - - /* - * We expect that the client might block in remove() waiting for any - * outstanding calls to qcom_smd_send() to wake up and finish. - */ - if (qsdrv->remove) - qsdrv->remove(qsdev); - - /* The client is now gone, close the primary channel */ - qcom_smd_channel_close(channel); - channel->qsdev = NULL; - - return 0; -} - -static struct bus_type qcom_smd_bus = { - .name = "qcom_smd", - .match = qcom_smd_dev_match, - .probe = qcom_smd_dev_probe, - .remove = qcom_smd_dev_remove, -}; - -/* - * Release function for the qcom_smd_device object. - */ -static void qcom_smd_release_device(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - - kfree(qsdev); -} - -/* - * Finds the device_node for the smd child interested in this channel. - */ -static struct device_node *qcom_smd_match_channel(struct device_node *edge_node, - const char *channel) -{ - struct device_node *child; - const char *name; - const char *key; - int ret; - - for_each_available_child_of_node(edge_node, child) { - key = "qcom,smd-channels"; - ret = of_property_read_string(child, key, &name); - if (ret) - continue; - - if (strcmp(name, channel) == 0) - return child; - } - - return NULL; -} - -/* - * Create a smd client device for channel that is being opened. - */ -static int qcom_smd_create_device(struct qcom_smd_channel *channel) -{ - struct qcom_smd_device *qsdev; - struct qcom_smd_edge *edge = channel->edge; - struct device_node *node; - int ret; - - if (channel->qsdev) - return -EEXIST; - - dev_dbg(&edge->dev, "registering '%s'\n", channel->name); - - qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); - if (!qsdev) - return -ENOMEM; - - node = qcom_smd_match_channel(edge->of_node, channel->name); - dev_set_name(&qsdev->dev, "%s.%s", - edge->of_node->name, - node ? node->name : channel->name); - - qsdev->dev.parent = &edge->dev; - qsdev->dev.bus = &qcom_smd_bus; - qsdev->dev.release = qcom_smd_release_device; - qsdev->dev.of_node = node; - - qsdev->channel = channel; - - channel->qsdev = qsdev; - - ret = device_register(&qsdev->dev); - if (ret) { - dev_err(&edge->dev, "device_register failed: %d\n", ret); - put_device(&qsdev->dev); - } - - return ret; -} - -/* - * Destroy a smd client device for a channel that's going away. - */ -static void qcom_smd_destroy_device(struct qcom_smd_channel *channel) -{ - struct device *dev; - - BUG_ON(!channel->qsdev); - - dev = &channel->qsdev->dev; - - device_unregister(dev); - of_node_put(dev->of_node); - put_device(dev); -} - -/** - * qcom_smd_driver_register - register a smd driver - * @qsdrv: qcom_smd_driver struct - */ -int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv) -{ - qsdrv->driver.bus = &qcom_smd_bus; - return driver_register(&qsdrv->driver); -} -EXPORT_SYMBOL(qcom_smd_driver_register); - -void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel) -{ - return channel->drvdata; -} -EXPORT_SYMBOL(qcom_smd_get_drvdata); - -void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data) -{ - channel->drvdata = data; -} -EXPORT_SYMBOL(qcom_smd_set_drvdata); - -/** - * qcom_smd_driver_unregister - unregister a smd driver - * @qsdrv: qcom_smd_driver struct - */ -void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv) -{ - driver_unregister(&qsdrv->driver); -} -EXPORT_SYMBOL(qcom_smd_driver_unregister); - -static struct qcom_smd_channel * -qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_channel *ret = NULL; - unsigned long flags; - unsigned state; - - spin_lock_irqsave(&edge->channels_lock, flags); - list_for_each_entry(channel, &edge->channels, list) { - if (strcmp(channel->name, name)) - continue; - - state = GET_RX_CHANNEL_INFO(channel, state); - if (state != SMD_CHANNEL_OPENING && - state != SMD_CHANNEL_OPENED) - continue; - - ret = channel; - break; - } - spin_unlock_irqrestore(&edge->channels_lock, flags); - - return ret; -} - -/** - * qcom_smd_open_channel() - claim additional channels on the same edge - * @sdev: smd_device handle - * @name: channel name - * @cb: callback method to use for incoming data - * - * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't - * ready. - * - * Any channels returned must be closed with a call to qcom_smd_close_channel() - */ -struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent, - const char *name, - qcom_smd_cb_t cb) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_device *sdev = parent->qsdev; - struct qcom_smd_edge *edge = parent->edge; - int ret; - - /* Wait up to HZ for the channel to appear */ - ret = wait_event_interruptible_timeout(edge->new_channel_event, - (channel = qcom_smd_find_channel(edge, name)) != NULL, - HZ); - if (!ret) - return ERR_PTR(-ETIMEDOUT); - - if (channel->state != SMD_CHANNEL_CLOSED) { - dev_err(&sdev->dev, "channel %s is busy\n", channel->name); - return ERR_PTR(-EBUSY); - } - - channel->qsdev = sdev; - ret = qcom_smd_channel_open(channel, cb); - if (ret) { - channel->qsdev = NULL; - return ERR_PTR(ret); - } - - return channel; -} -EXPORT_SYMBOL(qcom_smd_open_channel); - -/** - * qcom_smd_close_channel() - close an additionally opened channel - * @channel: channel handle, returned by qcom_smd_open_channel() - */ -void qcom_smd_close_channel(struct qcom_smd_channel *channel) -{ - qcom_smd_channel_close(channel); - channel->qsdev = NULL; -} -EXPORT_SYMBOL(qcom_smd_close_channel); - -/* - * Allocate the qcom_smd_channel object for a newly found smd channel, - * retrieving and validating the smem items involved. - */ -static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge, - unsigned smem_info_item, - unsigned smem_fifo_item, - char *name) -{ - struct qcom_smd_channel *channel; - size_t fifo_size; - size_t info_size; - void *fifo_base; - void *info; - int ret; - - channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL); - if (!channel) - return ERR_PTR(-ENOMEM); - - channel->edge = edge; - channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL); - if (!channel->name) - return ERR_PTR(-ENOMEM); - - mutex_init(&channel->tx_lock); - spin_lock_init(&channel->recv_lock); - init_waitqueue_head(&channel->fblockread_event); - - info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto free_name_and_channel; - } - - /* - * Use the size of the item to figure out which channel info struct to - * use. - */ - if (info_size == 2 * sizeof(struct smd_channel_info_word)) { - channel->info_word = info; - } else if (info_size == 2 * sizeof(struct smd_channel_info)) { - channel->info = info; - } else { - dev_err(&edge->dev, - "channel info of size %zu not supported\n", info_size); - ret = -EINVAL; - goto free_name_and_channel; - } - - fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size); - if (IS_ERR(fifo_base)) { - ret = PTR_ERR(fifo_base); - goto free_name_and_channel; - } - - /* The channel consist of a rx and tx fifo of equal size */ - fifo_size /= 2; - - dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n", - name, info_size, fifo_size); - - channel->tx_fifo = fifo_base; - channel->rx_fifo = fifo_base + fifo_size; - channel->fifo_size = fifo_size; - - qcom_smd_channel_reset(channel); - - return channel; - -free_name_and_channel: - devm_kfree(&edge->dev, channel->name); - devm_kfree(&edge->dev, channel); - - return ERR_PTR(ret); -} - -/* - * Scans the allocation table for any newly allocated channels, calls - * qcom_smd_create_channel() to create representations of these and add - * them to the edge's list of channels. - */ -static void qcom_channel_scan_worker(struct work_struct *work) -{ - struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work); - struct qcom_smd_alloc_entry *alloc_tbl; - struct qcom_smd_alloc_entry *entry; - struct qcom_smd_channel *channel; - unsigned long flags; - unsigned fifo_id; - unsigned info_id; - int tbl; - int i; - u32 eflags, cid; - - for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { - alloc_tbl = qcom_smem_get(edge->remote_pid, - smem_items[tbl].alloc_tbl_id, NULL); - if (IS_ERR(alloc_tbl)) - continue; - - for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { - entry = &alloc_tbl[i]; - eflags = le32_to_cpu(entry->flags); - if (test_bit(i, edge->allocated[tbl])) - continue; - - if (entry->ref_count == 0) - continue; - - if (!entry->name[0]) - continue; - - if (!(eflags & SMD_CHANNEL_FLAGS_PACKET)) - continue; - - if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) - continue; - - cid = le32_to_cpu(entry->cid); - info_id = smem_items[tbl].info_base_id + cid; - fifo_id = smem_items[tbl].fifo_base_id + cid; - - channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name); - if (IS_ERR(channel)) - continue; - - spin_lock_irqsave(&edge->channels_lock, flags); - list_add(&channel->list, &edge->channels); - spin_unlock_irqrestore(&edge->channels_lock, flags); - - dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name); - set_bit(i, edge->allocated[tbl]); - - wake_up_interruptible(&edge->new_channel_event); - } - } - - schedule_work(&edge->state_work); -} - -/* - * This per edge worker scans smem for any new channels and register these. It - * then scans all registered channels for state changes that should be handled - * by creating or destroying smd client devices for the registered channels. - * - * LOCKING: edge->channels_lock only needs to cover the list operations, as the - * worker is killed before any channels are deallocated - */ -static void qcom_channel_state_worker(struct work_struct *work) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_edge *edge = container_of(work, - struct qcom_smd_edge, - state_work); - unsigned remote_state; - unsigned long flags; - - /* - * Register a device for any closed channel where the remote processor - * is showing interest in opening the channel. - */ - spin_lock_irqsave(&edge->channels_lock, flags); - list_for_each_entry(channel, &edge->channels, list) { - if (channel->state != SMD_CHANNEL_CLOSED) - continue; - - remote_state = GET_RX_CHANNEL_INFO(channel, state); - if (remote_state != SMD_CHANNEL_OPENING && - remote_state != SMD_CHANNEL_OPENED) - continue; - - spin_unlock_irqrestore(&edge->channels_lock, flags); - qcom_smd_create_device(channel); - spin_lock_irqsave(&edge->channels_lock, flags); - } - - /* - * Unregister the device for any channel that is opened where the - * remote processor is closing the channel. - */ - list_for_each_entry(channel, &edge->channels, list) { - if (channel->state != SMD_CHANNEL_OPENING && - channel->state != SMD_CHANNEL_OPENED) - continue; - - remote_state = GET_RX_CHANNEL_INFO(channel, state); - if (remote_state == SMD_CHANNEL_OPENING || - remote_state == SMD_CHANNEL_OPENED) - continue; - - spin_unlock_irqrestore(&edge->channels_lock, flags); - qcom_smd_destroy_device(channel); - spin_lock_irqsave(&edge->channels_lock, flags); - } - spin_unlock_irqrestore(&edge->channels_lock, flags); -} - -/* - * Parses an of_node describing an edge. - */ -static int qcom_smd_parse_edge(struct device *dev, - struct device_node *node, - struct qcom_smd_edge *edge) -{ - struct device_node *syscon_np; - const char *key; - int irq; - int ret; - - INIT_LIST_HEAD(&edge->channels); - spin_lock_init(&edge->channels_lock); - - INIT_WORK(&edge->scan_work, qcom_channel_scan_worker); - INIT_WORK(&edge->state_work, qcom_channel_state_worker); - - edge->of_node = of_node_get(node); - - key = "qcom,smd-edge"; - ret = of_property_read_u32(node, key, &edge->edge_id); - if (ret) { - dev_err(dev, "edge missing %s property\n", key); - return -EINVAL; - } - - edge->remote_pid = QCOM_SMEM_HOST_ANY; - key = "qcom,remote-pid"; - of_property_read_u32(node, key, &edge->remote_pid); - - syscon_np = of_parse_phandle(node, "qcom,ipc", 0); - if (!syscon_np) { - dev_err(dev, "no qcom,ipc node\n"); - return -ENODEV; - } - - edge->ipc_regmap = syscon_node_to_regmap(syscon_np); - if (IS_ERR(edge->ipc_regmap)) - return PTR_ERR(edge->ipc_regmap); - - key = "qcom,ipc"; - ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset); - if (ret < 0) { - dev_err(dev, "no offset in %s\n", key); - return -EINVAL; - } - - ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit); - if (ret < 0) { - dev_err(dev, "no bit in %s\n", key); - return -EINVAL; - } - - irq = irq_of_parse_and_map(node, 0); - if (irq < 0) { - dev_err(dev, "required smd interrupt missing\n"); - return -EINVAL; - } - - ret = devm_request_irq(dev, irq, - qcom_smd_edge_intr, IRQF_TRIGGER_RISING, - node->name, edge); - if (ret) { - dev_err(dev, "failed to request smd irq\n"); - return ret; - } - - edge->irq = irq; - - return 0; -} - -/* - * Release function for an edge. - * Reset the state of each associated channel and free the edge context. - */ -static void qcom_smd_edge_release(struct device *dev) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_edge *edge = to_smd_edge(dev); - - list_for_each_entry(channel, &edge->channels, list) { - SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); - SET_RX_CHANNEL_INFO(channel, head, 0); - SET_RX_CHANNEL_INFO(channel, tail, 0); - } - - kfree(edge); -} - -/** - * qcom_smd_register_edge() - register an edge based on an device_node - * @parent: parent device for the edge - * @node: device_node describing the edge - * - * Returns an edge reference, or negative ERR_PTR() on failure. - */ -struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, - struct device_node *node) -{ - struct qcom_smd_edge *edge; - int ret; - - edge = kzalloc(sizeof(*edge), GFP_KERNEL); - if (!edge) - return ERR_PTR(-ENOMEM); - - init_waitqueue_head(&edge->new_channel_event); - - edge->dev.parent = parent; - edge->dev.release = qcom_smd_edge_release; - dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name); - ret = device_register(&edge->dev); - if (ret) { - pr_err("failed to register smd edge\n"); - return ERR_PTR(ret); - } - - ret = qcom_smd_parse_edge(&edge->dev, node, edge); - if (ret) { - dev_err(&edge->dev, "failed to parse smd edge\n"); - goto unregister_dev; - } - - schedule_work(&edge->scan_work); - - return edge; - -unregister_dev: - put_device(&edge->dev); - return ERR_PTR(ret); -} -EXPORT_SYMBOL(qcom_smd_register_edge); - -static int qcom_smd_remove_device(struct device *dev, void *data) -{ - device_unregister(dev); - of_node_put(dev->of_node); - put_device(dev); - - return 0; -} - -/** - * qcom_smd_unregister_edge() - release an edge and its children - * @edge: edge reference acquired from qcom_smd_register_edge - */ -int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) -{ - int ret; - - disable_irq(edge->irq); - cancel_work_sync(&edge->scan_work); - cancel_work_sync(&edge->state_work); - - ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device); - if (ret) - dev_warn(&edge->dev, "can't remove smd device: %d\n", ret); - - device_unregister(&edge->dev); - - return 0; -} -EXPORT_SYMBOL(qcom_smd_unregister_edge); - -static int qcom_smd_probe(struct platform_device *pdev) -{ - struct device_node *node; - void *p; - - /* Wait for smem */ - p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL); - if (PTR_ERR(p) == -EPROBE_DEFER) - return PTR_ERR(p); - - for_each_available_child_of_node(pdev->dev.of_node, node) - qcom_smd_register_edge(&pdev->dev, node); - - return 0; -} - -static int qcom_smd_remove_edge(struct device *dev, void *data) -{ - struct qcom_smd_edge *edge = to_smd_edge(dev); - - return qcom_smd_unregister_edge(edge); -} - -/* - * Shut down all smd clients by making sure that each edge stops processing - * events and scanning for new channels, then call destroy on the devices. - */ -static int qcom_smd_remove(struct platform_device *pdev) -{ - int ret; - - ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge); - if (ret) - dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret); - - return ret; -} - -static const struct of_device_id qcom_smd_of_match[] = { - { .compatible = "qcom,smd" }, - {} -}; -MODULE_DEVICE_TABLE(of, qcom_smd_of_match); - -static struct platform_driver qcom_smd_driver = { - .probe = qcom_smd_probe, - .remove = qcom_smd_remove, - .driver = { - .name = "qcom-smd", - .of_match_table = qcom_smd_of_match, - }, -}; - -static int __init qcom_smd_init(void) -{ - int ret; - - ret = bus_register(&qcom_smd_bus); - if (ret) { - pr_err("failed to register smd bus: %d\n", ret); - return ret; - } - - return platform_driver_register(&qcom_smd_driver); -} -postcore_initcall(qcom_smd_init); - -static void __exit qcom_smd_exit(void) -{ - platform_driver_unregister(&qcom_smd_driver); - bus_unregister(&qcom_smd_bus); -} -module_exit(qcom_smd_exit); - -MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); -MODULE_DESCRIPTION("Qualcomm Shared Memory Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index 520aedd29965..b9069184df19 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -14,10 +14,10 @@ #include <linux/firmware.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/soc/qcom/smd.h> #include <linux/io.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/rpmsg.h> #include <linux/soc/qcom/wcnss_ctrl.h> #define WCNSS_REQUEST_TIMEOUT (5 * HZ) @@ -40,7 +40,7 @@ */ struct wcnss_ctrl { struct device *dev; - struct qcom_smd_channel *channel; + struct rpmsg_endpoint *channel; struct completion ack; struct completion cbc; @@ -122,11 +122,13 @@ struct wcnss_download_nv_resp { * * Handles any incoming packets from the remote WCNSS_CTRL service. */ -static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel, - const void *data, - size_t count) +static int wcnss_ctrl_smd_callback(struct rpmsg_device *rpdev, + void *data, + int count, + void *priv, + u32 addr) { - struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(channel); + struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev); const struct wcnss_download_nv_resp *nvresp; const struct wcnss_version_resp *version; const struct wcnss_msg_hdr *hdr = data; @@ -180,7 +182,7 @@ static int wcnss_request_version(struct wcnss_ctrl *wcnss) msg.type = WCNSS_VERSION_REQ; msg.len = sizeof(msg); - ret = qcom_smd_send(wcnss->channel, &msg, sizeof(msg)); + ret = rpmsg_send(wcnss->channel, &msg, sizeof(msg)); if (ret < 0) return ret; @@ -238,7 +240,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) memcpy(req->fragment, data, req->frag_size); - ret = qcom_smd_send(wcnss->channel, req, req->hdr.len); + ret = rpmsg_send(wcnss->channel, req, req->hdr.len); if (ret < 0) { dev_err(wcnss->dev, "failed to send smd packet\n"); goto release_fw; @@ -274,11 +276,16 @@ free_req: * @name: SMD channel name * @cb: callback to handle incoming data on the channel */ -struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb) +struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv) { + struct rpmsg_channel_info chinfo; struct wcnss_ctrl *_wcnss = wcnss; - return qcom_smd_open_channel(_wcnss->channel, name, cb); + strncpy(chinfo.name, name, sizeof(chinfo.name)); + chinfo.src = RPMSG_ADDR_ANY; + chinfo.dst = RPMSG_ADDR_ANY; + + return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo); } EXPORT_SYMBOL(qcom_wcnss_open_channel); @@ -306,35 +313,34 @@ static void wcnss_async_probe(struct work_struct *work) of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev); } -static int wcnss_ctrl_probe(struct qcom_smd_device *sdev) +static int wcnss_ctrl_probe(struct rpmsg_device *rpdev) { struct wcnss_ctrl *wcnss; - wcnss = devm_kzalloc(&sdev->dev, sizeof(*wcnss), GFP_KERNEL); + wcnss = devm_kzalloc(&rpdev->dev, sizeof(*wcnss), GFP_KERNEL); if (!wcnss) return -ENOMEM; - wcnss->dev = &sdev->dev; - wcnss->channel = sdev->channel; + wcnss->dev = &rpdev->dev; + wcnss->channel = rpdev->ept; init_completion(&wcnss->ack); init_completion(&wcnss->cbc); INIT_WORK(&wcnss->probe_work, wcnss_async_probe); - qcom_smd_set_drvdata(sdev->channel, wcnss); - dev_set_drvdata(&sdev->dev, wcnss); + dev_set_drvdata(&rpdev->dev, wcnss); schedule_work(&wcnss->probe_work); return 0; } -static void wcnss_ctrl_remove(struct qcom_smd_device *sdev) +static void wcnss_ctrl_remove(struct rpmsg_device *rpdev) { - struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(sdev->channel); + struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev); cancel_work_sync(&wcnss->probe_work); - of_platform_depopulate(&sdev->dev); + of_platform_depopulate(&rpdev->dev); } static const struct of_device_id wcnss_ctrl_of_match[] = { @@ -342,18 +348,18 @@ static const struct of_device_id wcnss_ctrl_of_match[] = { {} }; -static struct qcom_smd_driver wcnss_ctrl_driver = { +static struct rpmsg_driver wcnss_ctrl_driver = { .probe = wcnss_ctrl_probe, .remove = wcnss_ctrl_remove, .callback = wcnss_ctrl_smd_callback, - .driver = { + .drv = { .name = "qcom_wcnss_ctrl", .owner = THIS_MODULE, .of_match_table = wcnss_ctrl_of_match, }, }; -module_qcom_smd_driver(wcnss_ctrl_driver); +module_rpmsg_driver(wcnss_ctrl_driver); MODULE_DESCRIPTION("Qualcomm WCNSS control driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 6e0d614a8075..41d5edfaf0b1 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -801,9 +801,6 @@ struct platform_device *ci_hdrc_add_device(struct device *dev, } pdev->dev.parent = dev; - pdev->dev.dma_mask = dev->dma_mask; - pdev->dev.dma_parms = dev->dma_parms; - dma_set_coherent_mask(&pdev->dev, dev->coherent_dma_mask); ret = platform_device_add_resources(pdev, res, nres); if (ret) diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 111b0e0b8698..3218b49d04fe 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -116,7 +116,8 @@ static int host_start(struct ci_hdrc *ci) if (usb_disabled()) return -ENODEV; - hcd = usb_create_hcd(&ci_ehci_hc_driver, ci->dev, dev_name(ci->dev)); + hcd = __usb_create_hcd(&ci_ehci_hc_driver, ci->dev->parent, + ci->dev, dev_name(ci->dev), NULL); if (!hcd) return -ENOMEM; diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 6a15b7250e9c..1d6ab13a85bb 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -423,7 +423,8 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) hwreq->req.status = -EALREADY; - ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir); + ret = usb_gadget_map_request_by_dev(ci->dev->parent, + &hwreq->req, hwep->dir); if (ret) return ret; @@ -603,7 +604,8 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) list_del_init(&node->td); } - usb_gadget_unmap_request(&hwep->ci->gadget, &hwreq->req, hwep->dir); + usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent, + &hwreq->req, hwep->dir); hwreq->req.actual += actual; @@ -1902,13 +1904,13 @@ static int udc_start(struct ci_hdrc *ci) INIT_LIST_HEAD(&ci->gadget.ep_list); /* alloc resources */ - ci->qh_pool = dma_pool_create("ci_hw_qh", dev, + ci->qh_pool = dma_pool_create("ci_hw_qh", dev->parent, sizeof(struct ci_hw_qh), 64, CI_HDRC_PAGE_SIZE); if (ci->qh_pool == NULL) return -ENOMEM; - ci->td_pool = dma_pool_create("ci_hw_td", dev, + ci->td_pool = dma_pool_create("ci_hw_td", dev->parent, sizeof(struct ci_hw_td), 64, CI_HDRC_PAGE_SIZE); if (ci->td_pool == NULL) { diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c index 98e39f91723a..a6cd44a711cf 100644 --- a/drivers/usb/core/buffer.c +++ b/drivers/usb/core/buffer.c @@ -63,7 +63,7 @@ int hcd_buffer_create(struct usb_hcd *hcd) int i, size; if (!IS_ENABLED(CONFIG_HAS_DMA) || - (!hcd->self.controller->dma_mask && + (!is_device_dma_capable(hcd->self.sysdev) && !(hcd->driver->flags & HCD_LOCAL_MEM))) return 0; @@ -72,7 +72,7 @@ int hcd_buffer_create(struct usb_hcd *hcd) if (!size) continue; snprintf(name, sizeof(name), "buffer-%d", size); - hcd->pool[i] = dma_pool_create(name, hcd->self.controller, + hcd->pool[i] = dma_pool_create(name, hcd->self.sysdev, size, size, 0); if (!hcd->pool[i]) { hcd_buffer_destroy(hcd); @@ -127,7 +127,7 @@ void *hcd_buffer_alloc( /* some USB hosts just use PIO */ if (!IS_ENABLED(CONFIG_HAS_DMA) || - (!bus->controller->dma_mask && + (!is_device_dma_capable(bus->sysdev) && !(hcd->driver->flags & HCD_LOCAL_MEM))) { *dma = ~(dma_addr_t) 0; return kmalloc(size, mem_flags); @@ -137,7 +137,7 @@ void *hcd_buffer_alloc( if (size <= pool_max[i]) return dma_pool_alloc(hcd->pool[i], mem_flags, dma); } - return dma_alloc_coherent(hcd->self.controller, size, dma, mem_flags); + return dma_alloc_coherent(hcd->self.sysdev, size, dma, mem_flags); } void hcd_buffer_free( @@ -154,7 +154,7 @@ void hcd_buffer_free( return; if (!IS_ENABLED(CONFIG_HAS_DMA) || - (!bus->controller->dma_mask && + (!is_device_dma_capable(bus->sysdev) && !(hcd->driver->flags & HCD_LOCAL_MEM))) { kfree(addr); return; @@ -166,5 +166,5 @@ void hcd_buffer_free( return; } } - dma_free_coherent(hcd->self.controller, size, addr, dma); + dma_free_coherent(hcd->self.sysdev, size, addr, dma); } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 882fc4e08284..313ae0863c3a 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1076,6 +1076,7 @@ static void usb_deregister_bus (struct usb_bus *bus) static int register_root_hub(struct usb_hcd *hcd) { struct device *parent_dev = hcd->self.controller; + struct device *sysdev = hcd->self.sysdev; struct usb_device *usb_dev = hcd->self.root_hub; const int devnum = 1; int retval; @@ -1122,7 +1123,7 @@ static int register_root_hub(struct usb_hcd *hcd) /* Did the HC die before the root hub was registered? */ if (HCD_DEAD(hcd)) usb_hc_died (hcd); /* This time clean up */ - usb_dev->dev.of_node = parent_dev->of_node; + usb_dev->dev.of_node = sysdev->of_node; } mutex_unlock(&usb_bus_idr_lock); @@ -1468,19 +1469,19 @@ void usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; if (IS_ENABLED(CONFIG_HAS_DMA) && (urb->transfer_flags & URB_DMA_MAP_SG)) - dma_unmap_sg(hcd->self.controller, + dma_unmap_sg(hcd->self.sysdev, urb->sg, urb->num_sgs, dir); else if (IS_ENABLED(CONFIG_HAS_DMA) && (urb->transfer_flags & URB_DMA_MAP_PAGE)) - dma_unmap_page(hcd->self.controller, + dma_unmap_page(hcd->self.sysdev, urb->transfer_dma, urb->transfer_buffer_length, dir); else if (IS_ENABLED(CONFIG_HAS_DMA) && (urb->transfer_flags & URB_DMA_MAP_SINGLE)) - dma_unmap_single(hcd->self.controller, + dma_unmap_single(hcd->self.sysdev, urb->transfer_dma, urb->transfer_buffer_length, dir); @@ -1523,11 +1524,11 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, return ret; if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) { urb->setup_dma = dma_map_single( - hcd->self.controller, + hcd->self.sysdev, urb->setup_packet, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); - if (dma_mapping_error(hcd->self.controller, + if (dma_mapping_error(hcd->self.sysdev, urb->setup_dma)) return -EAGAIN; urb->transfer_flags |= URB_SETUP_MAP_SINGLE; @@ -1558,7 +1559,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, } n = dma_map_sg( - hcd->self.controller, + hcd->self.sysdev, urb->sg, urb->num_sgs, dir); @@ -1573,12 +1574,12 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, } else if (urb->sg) { struct scatterlist *sg = urb->sg; urb->transfer_dma = dma_map_page( - hcd->self.controller, + hcd->self.sysdev, sg_page(sg), sg->offset, urb->transfer_buffer_length, dir); - if (dma_mapping_error(hcd->self.controller, + if (dma_mapping_error(hcd->self.sysdev, urb->transfer_dma)) ret = -EAGAIN; else @@ -1588,11 +1589,11 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, ret = -EAGAIN; } else { urb->transfer_dma = dma_map_single( - hcd->self.controller, + hcd->self.sysdev, urb->transfer_buffer, urb->transfer_buffer_length, dir); - if (dma_mapping_error(hcd->self.controller, + if (dma_mapping_error(hcd->self.sysdev, urb->transfer_dma)) ret = -EAGAIN; else @@ -2500,24 +2501,8 @@ static void init_giveback_urb_bh(struct giveback_urb_bh *bh) tasklet_init(&bh->bh, usb_giveback_urb_bh, (unsigned long)bh); } -/** - * usb_create_shared_hcd - create and initialize an HCD structure - * @driver: HC driver that will use this hcd - * @dev: device for this HC, stored in hcd->self.controller - * @bus_name: value to store in hcd->self.bus_name - * @primary_hcd: a pointer to the usb_hcd structure that is sharing the - * PCI device. Only allocate certain resources for the primary HCD - * Context: !in_interrupt() - * - * Allocate a struct usb_hcd, with extra space at the end for the - * HC driver's private data. Initialize the generic members of the - * hcd structure. - * - * Return: On success, a pointer to the created and initialized HCD structure. - * On failure (e.g. if memory is unavailable), %NULL. - */ -struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, - struct device *dev, const char *bus_name, +struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver, + struct device *sysdev, struct device *dev, const char *bus_name, struct usb_hcd *primary_hcd) { struct usb_hcd *hcd; @@ -2559,8 +2544,9 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, usb_bus_init(&hcd->self); hcd->self.controller = dev; + hcd->self.sysdev = sysdev; hcd->self.bus_name = bus_name; - hcd->self.uses_dma = (dev->dma_mask != NULL); + hcd->self.uses_dma = (sysdev->dma_mask != NULL); init_timer(&hcd->rh_timer); hcd->rh_timer.function = rh_timer_func; @@ -2575,6 +2561,30 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, "USB Host Controller"; return hcd; } +EXPORT_SYMBOL_GPL(__usb_create_hcd); + +/** + * usb_create_shared_hcd - create and initialize an HCD structure + * @driver: HC driver that will use this hcd + * @dev: device for this HC, stored in hcd->self.controller + * @bus_name: value to store in hcd->self.bus_name + * @primary_hcd: a pointer to the usb_hcd structure that is sharing the + * PCI device. Only allocate certain resources for the primary HCD + * Context: !in_interrupt() + * + * Allocate a struct usb_hcd, with extra space at the end for the + * HC driver's private data. Initialize the generic members of the + * hcd structure. + * + * Return: On success, a pointer to the created and initialized HCD structure. + * On failure (e.g. if memory is unavailable), %NULL. + */ +struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, + struct device *dev, const char *bus_name, + struct usb_hcd *primary_hcd) +{ + return __usb_create_hcd(driver, dev, dev, bus_name, primary_hcd); +} EXPORT_SYMBOL_GPL(usb_create_shared_hcd); /** @@ -2594,7 +2604,7 @@ EXPORT_SYMBOL_GPL(usb_create_shared_hcd); struct usb_hcd *usb_create_hcd(const struct hc_driver *driver, struct device *dev, const char *bus_name) { - return usb_create_shared_hcd(driver, dev, bus_name, NULL); + return __usb_create_hcd(driver, dev, dev, bus_name, NULL); } EXPORT_SYMBOL_GPL(usb_create_hcd); @@ -2721,7 +2731,7 @@ int usb_add_hcd(struct usb_hcd *hcd, struct usb_device *rhdev; if (IS_ENABLED(CONFIG_USB_PHY) && !hcd->usb_phy) { - struct usb_phy *phy = usb_get_phy_dev(hcd->self.controller, 0); + struct usb_phy *phy = usb_get_phy_dev(hcd->self.sysdev, 0); if (IS_ERR(phy)) { retval = PTR_ERR(phy); @@ -2739,7 +2749,7 @@ int usb_add_hcd(struct usb_hcd *hcd, } if (IS_ENABLED(CONFIG_GENERIC_PHY) && !hcd->phy) { - struct phy *phy = phy_get(hcd->self.controller, "usb"); + struct phy *phy = phy_get(hcd->self.sysdev, "usb"); if (IS_ERR(phy)) { retval = PTR_ERR(phy); @@ -2787,7 +2797,7 @@ int usb_add_hcd(struct usb_hcd *hcd, */ retval = hcd_buffer_create(hcd); if (retval != 0) { - dev_dbg(hcd->self.controller, "pool alloc failed\n"); + dev_dbg(hcd->self.sysdev, "pool alloc failed\n"); goto err_create_buf; } @@ -2797,7 +2807,7 @@ int usb_add_hcd(struct usb_hcd *hcd, rhdev = usb_alloc_dev(NULL, &hcd->self, 0); if (rhdev == NULL) { - dev_err(hcd->self.controller, "unable to allocate root hub\n"); + dev_err(hcd->self.sysdev, "unable to allocate root hub\n"); retval = -ENOMEM; goto err_allocate_root_hub; } diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index eaf1c3b06f02..e1ab778e774a 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -452,9 +452,9 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, * Note: calling dma_set_mask() on a USB device would set the * mask for the entire HCD, so don't do that. */ - dev->dev.dma_mask = bus->controller->dma_mask; - dev->dev.dma_pfn_offset = bus->controller->dma_pfn_offset; - set_dev_node(&dev->dev, dev_to_node(bus->controller)); + dev->dev.dma_mask = bus->sysdev->dma_mask; + dev->dev.dma_pfn_offset = bus->sysdev->dma_pfn_offset; + set_dev_node(&dev->dev, dev_to_node(bus->sysdev)); dev->state = USB_STATE_ATTACHED; dev->lpm_disable_count = 1; atomic_set(&dev->urbnum, 0); @@ -802,7 +802,7 @@ struct urb *usb_buffer_map(struct urb *urb) if (!urb || !urb->dev || !(bus = urb->dev->bus) - || !(controller = bus->controller)) + || !(controller = bus->sysdev)) return NULL; if (controller->dma_mask) { @@ -840,7 +840,7 @@ void usb_buffer_dmasync(struct urb *urb) || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) || !urb->dev || !(bus = urb->dev->bus) - || !(controller = bus->controller)) + || !(controller = bus->sysdev)) return; if (controller->dma_mask) { @@ -874,7 +874,7 @@ void usb_buffer_unmap(struct urb *urb) || !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) || !urb->dev || !(bus = urb->dev->bus) - || !(controller = bus->controller)) + || !(controller = bus->sysdev)) return; if (controller->dma_mask) { @@ -924,7 +924,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in, if (!dev || !(bus = dev->bus) - || !(controller = bus->controller) + || !(controller = bus->sysdev) || !controller->dma_mask) return -EINVAL; @@ -960,7 +960,7 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in, if (!dev || !(bus = dev->bus) - || !(controller = bus->controller) + || !(controller = bus->sysdev) || !controller->dma_mask) return; @@ -988,7 +988,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in, if (!dev || !(bus = dev->bus) - || !(controller = bus->controller) + || !(controller = bus->sysdev) || !controller->dma_mask) return; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0c4f9c67c221..0e26dfe626ae 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -135,6 +135,7 @@ struct dma_buf_attachment; #define DRM_UT_PRIME 0x08 #define DRM_UT_ATOMIC 0x10 #define DRM_UT_VBL 0x20 +#define DRM_UT_STATE 0x40 extern __printf(6, 7) void drm_dev_printk(const struct device *dev, const char *level, @@ -306,6 +307,27 @@ void drm_printk(const char *level, unsigned int category, #define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \ DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args) +/* Format strings and argument splitters to simplify printing + * various "complex" objects + */ +#define DRM_MODE_FMT "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x" +#define DRM_MODE_ARG(m) \ + (m)->base.id, (m)->name, (m)->vrefresh, (m)->clock, \ + (m)->hdisplay, (m)->hsync_start, (m)->hsync_end, (m)->htotal, \ + (m)->vdisplay, (m)->vsync_start, (m)->vsync_end, (m)->vtotal, \ + (m)->type, (m)->flags + +#define DRM_RECT_FMT "%dx%d%+d%+d" +#define DRM_RECT_ARG(r) drm_rect_width(r), drm_rect_height(r), (r)->x1, (r)->y1 + +/* for rect's in fixed-point format: */ +#define DRM_RECT_FP_FMT "%d.%06ux%d.%06u%+d.%06u%+d.%06u" +#define DRM_RECT_FP_ARG(r) \ + drm_rect_width(r) >> 16, ((drm_rect_width(r) & 0xffff) * 15625) >> 10, \ + drm_rect_height(r) >> 16, ((drm_rect_height(r) & 0xffff) * 15625) >> 10, \ + (r)->x1 >> 16, (((r)->x1 & 0xffff) * 15625) >> 10, \ + (r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10 + /*@}*/ /***********************************************************************/ diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 9701f2dfb784..9e760f07c0e9 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -335,6 +335,13 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state); int __must_check drm_atomic_commit(struct drm_atomic_state *state); int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); +void drm_state_dump(struct drm_device *dev, struct drm_printer *p); + +#ifdef CONFIG_DEBUG_FS +struct drm_minor; +int drm_atomic_debugfs_init(struct drm_minor *minor); +#endif + #define for_each_connector_in_state(__state, connector, connector_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->num_connector && \ diff --git a/include/drm/drm_blend.h b/include/drm/drm_blend.h index 36baa175de99..fd351924e1c5 100644 --- a/include/drm/drm_blend.h +++ b/include/drm/drm_blend.h @@ -47,8 +47,16 @@ struct drm_atomic_state; #define DRM_REFLECT_Y BIT(5) #define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y) +static inline bool drm_rotation_90_or_270(unsigned int rotation) +{ + return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270); +} + struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, unsigned int supported_rotations); +int drm_plane_create_rotation_property(struct drm_plane *plane, + unsigned int rotation, + unsigned int supported_rotations); unsigned int drm_rotation_simplify(unsigned int rotation, unsigned int supported_rotations); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index d8bb8d151825..78778badd4e5 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -37,6 +37,7 @@ struct drm_crtc; struct drm_encoder; struct drm_property; struct drm_property_blob; +struct drm_printer; struct edid; enum drm_connector_force { @@ -485,6 +486,18 @@ struct drm_connector_funcs { const struct drm_connector_state *state, struct drm_property *property, uint64_t *val); + + /** + * @atomic_print_state: + * + * If driver subclasses struct &drm_connector_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_connector_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_connector_state *state); }; /* mode specified on the command line */ diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 0aa292526567..1b7985e19759 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -52,8 +52,9 @@ struct drm_device; struct drm_mode_set; struct drm_file; struct drm_clip_rect; +struct drm_printer; struct device_node; -struct fence; +struct dma_fence; struct edid; static inline int64_t U642I64(uint64_t val) @@ -564,6 +565,18 @@ struct drm_crtc_funcs { * before data structures are torndown. */ void (*early_unregister)(struct drm_crtc *crtc); + + /** + * @atomic_print_state: + * + * If driver subclasses struct &drm_crtc_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_crtc_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_crtc_state *state); }; /** diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index c5576fbcb909..d918ce45ec2c 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -82,8 +82,6 @@ struct drm_modeset_lock { struct list_head head; }; -extern struct ww_class crtc_ww_class; - void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, uint32_t flags); void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx); @@ -91,15 +89,7 @@ void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx); void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx); int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx); -/** - * drm_modeset_lock_init - initialize lock - * @lock: lock to init - */ -static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock) -{ - ww_mutex_init(&lock->mutex, &crtc_ww_class); - INIT_LIST_HEAD(&lock->head); -} +void drm_modeset_lock_init(struct drm_modeset_lock *lock); /** * drm_modeset_lock_fini - cleanup lock diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 3fd87b386ed7..d6b4c5587bbe 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -4,6 +4,7 @@ #include <linux/of_graph.h> struct component_master_ops; +struct component_match; struct device; struct drm_device; struct drm_encoder; @@ -12,6 +13,10 @@ struct device_node; #ifdef CONFIG_OF extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, struct device_node *port); +extern void drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node); extern int drm_of_component_probe(struct device *dev, int (*compare_of)(struct device *, void *), const struct component_master_ops *m_ops); @@ -25,6 +30,13 @@ static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev, return 0; } +static void drm_of_component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), + struct device_node *node) +{ +} + static inline int drm_of_component_probe(struct device *dev, int (*compare_of)(struct device *, void *), diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 8b4dc62470ff..aea7f1b2ba82 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -28,6 +28,7 @@ #include <drm/drm_mode_object.h> struct drm_crtc; +struct drm_printer; /** * struct drm_plane_state - mutable plane state @@ -94,6 +95,29 @@ struct drm_plane_state { struct drm_atomic_state *state; }; +static inline struct drm_rect +drm_plane_state_src(const struct drm_plane_state *state) +{ + struct drm_rect src = { + .x1 = state->src_x, + .y1 = state->src_y, + .x2 = state->src_x + state->src_w, + .y2 = state->src_y + state->src_h, + }; + return src; +} + +static inline struct drm_rect +drm_plane_state_dest(const struct drm_plane_state *state) +{ + struct drm_rect dest = { + .x1 = state->crtc_x, + .y1 = state->crtc_y, + .x2 = state->crtc_x + state->crtc_w, + .y2 = state->crtc_y + state->crtc_h, + }; + return dest; +} /** * struct drm_plane_funcs - driver plane control functions @@ -323,6 +347,18 @@ struct drm_plane_funcs { * before data structures are torndown. */ void (*early_unregister)(struct drm_plane *plane); + + /** + * @atomic_print_state: + * + * If driver subclasses struct &drm_plane_state, it should implement + * this optional hook for printing additional driver specific state. + * + * Do not call this directly, use drm_atomic_plane_print_state() + * instead. + */ + void (*atomic_print_state)(struct drm_printer *p, + const struct drm_plane_state *state); }; /** @@ -392,6 +428,7 @@ enum drm_plane_type { * @type: type of plane (overlay, primary, cursor) * @state: current atomic state for this plane * @zpos_property: zpos property for this plane + * @rotation_property: rotation property for this plane * @helper_private: mid-layer private data */ struct drm_plane { @@ -438,6 +475,7 @@ struct drm_plane { struct drm_plane_state *state; struct drm_property *zpos_property; + struct drm_property *rotation_property; }; #define obj_to_plane(x) container_of(x, struct drm_plane, base) diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h new file mode 100644 index 000000000000..475ffe3730e9 --- /dev/null +++ b/include/drm/drm_print.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2016 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rob Clark <robdclark@gmail.com> + */ + +#ifndef DRM_PRINT_H_ +#define DRM_PRINT_H_ + +#include <linux/seq_file.h> +#include <linux/device.h> + +/** + * DOC: print + * + * A simple wrapper for dev_printk(), seq_printf(), etc. Allows same + * debug code to be used for both debugfs and printk logging. + * + * For example:: + * + * void log_some_info(struct drm_printer *p) + * { + * drm_printf(p, "foo=%d\n", foo); + * drm_printf(p, "bar=%d\n", bar); + * } + * + * #ifdef CONFIG_DEBUG_FS + * void debugfs_show(struct seq_file *f) + * { + * struct drm_printer p = drm_seq_file_printer(f); + * log_some_info(&p); + * } + * #endif + * + * void some_other_function(...) + * { + * struct drm_printer p = drm_info_printer(drm->dev); + * log_some_info(&p); + * } + */ + +/** + * struct drm_printer - drm output "stream" + * @printfn: actual output fxn + * @arg: output fxn specific data + * + * Do not use struct members directly. Use drm_printer_seq_file(), + * drm_printer_info(), etc to initialize. And drm_printf() for output. + */ +struct drm_printer { + void (*printfn)(struct drm_printer *p, struct va_format *vaf); + void *arg; +}; + +void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf); +void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); + +/** + * drm_printf - print to a &drm_printer stream + * @p: the &drm_printer + * @f: format string + */ +void drm_printf(struct drm_printer *p, const char *f, ...); + + +/** + * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file + * @f: the struct &seq_file to output to + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_seq_file_printer(struct seq_file *f) +{ + struct drm_printer p = { + .printfn = __drm_printfn_seq_file, + .arg = f, + }; + return p; +} + +/** + * drm_info_printer - construct a &drm_printer that outputs to dev_printk() + * @dev: the struct &device pointer + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_info_printer(struct device *dev) +{ + struct drm_printer p = { + .printfn = __drm_printfn_info, + .arg = dev, + }; + return p; +} + +#endif /* DRM_PRINT_H_ */ diff --git a/include/dt-bindings/clock/qcom,gcc-msm8960.h b/include/dt-bindings/clock/qcom,gcc-msm8960.h index 7d20eedfee98..e02742fc81cc 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8960.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8960.h @@ -319,5 +319,7 @@ #define CE3_SRC 303 #define CE3_CORE_CLK 304 #define CE3_H_CLK 305 +#define PLL16 306 +#define PLL17 307 #endif diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h new file mode 100644 index 000000000000..5924cdb71336 --- /dev/null +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -0,0 +1,69 @@ +/* + * Copyright 2015 Linaro Limited + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H +#define _DT_BINDINGS_CLK_MSM_RPMCC_H + +/* apq8064 */ +#define RPM_PXO_CLK 0 +#define RPM_PXO_A_CLK 1 +#define RPM_CXO_CLK 2 +#define RPM_CXO_A_CLK 3 +#define RPM_APPS_FABRIC_CLK 4 +#define RPM_APPS_FABRIC_A_CLK 5 +#define RPM_CFPB_CLK 6 +#define RPM_CFPB_A_CLK 7 +#define RPM_QDSS_CLK 8 +#define RPM_QDSS_A_CLK 9 +#define RPM_DAYTONA_FABRIC_CLK 10 +#define RPM_DAYTONA_FABRIC_A_CLK 11 +#define RPM_EBI1_CLK 12 +#define RPM_EBI1_A_CLK 13 +#define RPM_MM_FABRIC_CLK 14 +#define RPM_MM_FABRIC_A_CLK 15 +#define RPM_MMFPB_CLK 16 +#define RPM_MMFPB_A_CLK 17 +#define RPM_SYS_FABRIC_CLK 18 +#define RPM_SYS_FABRIC_A_CLK 19 +#define RPM_SFPB_CLK 20 +#define RPM_SFPB_A_CLK 21 + +/* msm8916 */ +#define RPM_SMD_XO_CLK_SRC 0 +#define RPM_SMD_XO_A_CLK_SRC 1 +#define RPM_SMD_PCNOC_CLK 2 +#define RPM_SMD_PCNOC_A_CLK 3 +#define RPM_SMD_SNOC_CLK 4 +#define RPM_SMD_SNOC_A_CLK 5 +#define RPM_SMD_BIMC_CLK 6 +#define RPM_SMD_BIMC_A_CLK 7 +#define RPM_SMD_QDSS_CLK 8 +#define RPM_SMD_QDSS_A_CLK 9 +#define RPM_SMD_BB_CLK1 10 +#define RPM_SMD_BB_CLK1_A 11 +#define RPM_SMD_BB_CLK2 12 +#define RPM_SMD_BB_CLK2_A 13 +#define RPM_SMD_RF_CLK1 14 +#define RPM_SMD_RF_CLK1_A 15 +#define RPM_SMD_RF_CLK2 16 +#define RPM_SMD_RF_CLK2_A 17 +#define RPM_SMD_BB_CLK1_PIN 18 +#define RPM_SMD_BB_CLK1_A_PIN 19 +#define RPM_SMD_BB_CLK2_PIN 20 +#define RPM_SMD_BB_CLK2_A_PIN 21 +#define RPM_SMD_RF_CLK1_PIN 22 +#define RPM_SMD_RF_CLK1_A_PIN 23 +#define RPM_SMD_RF_CLK2_PIN 24 +#define RPM_SMD_RF_CLK2_A_PIN 25 + +#endif diff --git a/include/dt-bindings/soc/msm-bus-ids.h b/include/dt-bindings/soc/msm-bus-ids.h new file mode 100644 index 000000000000..9ae56db1d235 --- /dev/null +++ b/include/dt-bindings/soc/msm-bus-ids.h @@ -0,0 +1,661 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MSM_BUS_IDS_H +#define __MSM_BUS_IDS_H + +/* Topology related enums */ +#define MSM_BUS_FAB_DEFAULT 0 +#define MSM_BUS_FAB_APPSS 0 +#define MSM_BUS_FAB_SYSTEM 1024 +#define MSM_BUS_FAB_MMSS 2048 +#define MSM_BUS_FAB_SYSTEM_FPB 3072 +#define MSM_BUS_FAB_CPSS_FPB 4096 + +#define MSM_BUS_FAB_BIMC 0 +#define MSM_BUS_FAB_SYS_NOC 1024 +#define MSM_BUS_FAB_MMSS_NOC 2048 +#define MSM_BUS_FAB_OCMEM_NOC 3072 +#define MSM_BUS_FAB_PERIPH_NOC 4096 +#define MSM_BUS_FAB_CONFIG_NOC 5120 +#define MSM_BUS_FAB_OCMEM_VNOC 6144 + +#define MSM_BUS_MASTER_FIRST 1 +#define MSM_BUS_MASTER_AMPSS_M0 1 +#define MSM_BUS_MASTER_AMPSS_M1 2 +#define MSM_BUS_APPSS_MASTER_FAB_MMSS 3 +#define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4 +#define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5 +#define MSM_BUS_MASTER_SPS 6 +#define MSM_BUS_MASTER_ADM_PORT0 7 +#define MSM_BUS_MASTER_ADM_PORT1 8 +#define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9 +#define MSM_BUS_MASTER_ADM1_PORT1 10 +#define MSM_BUS_MASTER_LPASS_PROC 11 +#define MSM_BUS_MASTER_MSS_PROCI 12 +#define MSM_BUS_MASTER_MSS_PROCD 13 +#define MSM_BUS_MASTER_MSS_MDM_PORT0 14 +#define MSM_BUS_MASTER_LPASS 15 +#define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16 +#define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17 +#define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18 +#define MSM_BUS_MASTER_ADM1_CI 19 +#define MSM_BUS_MASTER_ADM0_CI 20 +#define MSM_BUS_MASTER_MSS_MDM_PORT1 21 +#define MSM_BUS_MASTER_MDP_PORT0 22 +#define MSM_BUS_MASTER_MDP_PORT1 23 +#define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24 +#define MSM_BUS_MASTER_ROTATOR 25 +#define MSM_BUS_MASTER_GRAPHICS_3D 26 +#define MSM_BUS_MASTER_JPEG_DEC 27 +#define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28 +#define MSM_BUS_MASTER_VFE 29 +#define MSM_BUS_MASTER_VPE 30 +#define MSM_BUS_MASTER_JPEG_ENC 31 +#define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32 +#define MSM_BUS_MMSS_MASTER_APPS_FAB 33 +#define MSM_BUS_MASTER_HD_CODEC_PORT0 34 +#define MSM_BUS_MASTER_HD_CODEC_PORT1 35 +#define MSM_BUS_MASTER_SPDM 36 +#define MSM_BUS_MASTER_RPM 37 +#define MSM_BUS_MASTER_MSS 38 +#define MSM_BUS_MASTER_RIVA 39 +#define MSM_BUS_SYSTEM_MASTER_UNUSED_6 40 +#define MSM_BUS_MASTER_MSS_SW_PROC 41 +#define MSM_BUS_MASTER_MSS_FW_PROC 42 +#define MSM_BUS_MMSS_MASTER_UNUSED_2 43 +#define MSM_BUS_MASTER_GSS_NAV 44 +#define MSM_BUS_MASTER_PCIE 45 +#define MSM_BUS_MASTER_SATA 46 +#define MSM_BUS_MASTER_CRYPTO 47 +#define MSM_BUS_MASTER_VIDEO_CAP 48 +#define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49 +#define MSM_BUS_MASTER_VIDEO_ENC 50 +#define MSM_BUS_MASTER_VIDEO_DEC 51 +#define MSM_BUS_MASTER_LPASS_AHB 52 +#define MSM_BUS_MASTER_QDSS_BAM 53 +#define MSM_BUS_MASTER_SNOC_CFG 54 +#define MSM_BUS_MASTER_CRYPTO_CORE0 55 +#define MSM_BUS_MASTER_CRYPTO_CORE1 56 +#define MSM_BUS_MASTER_MSS_NAV 57 +#define MSM_BUS_MASTER_OCMEM_DMA 58 +#define MSM_BUS_MASTER_WCSS 59 +#define MSM_BUS_MASTER_QDSS_ETR 60 +#define MSM_BUS_MASTER_USB3 61 +#define MSM_BUS_MASTER_JPEG 62 +#define MSM_BUS_MASTER_VIDEO_P0 63 +#define MSM_BUS_MASTER_VIDEO_P1 64 +#define MSM_BUS_MASTER_MSS_PROC 65 +#define MSM_BUS_MASTER_JPEG_OCMEM 66 +#define MSM_BUS_MASTER_MDP_OCMEM 67 +#define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68 +#define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69 +#define MSM_BUS_MASTER_VFE_OCMEM 70 +#define MSM_BUS_MASTER_CNOC_ONOC_CFG 71 +#define MSM_BUS_MASTER_RPM_INST 72 +#define MSM_BUS_MASTER_RPM_DATA 73 +#define MSM_BUS_MASTER_RPM_SYS 74 +#define MSM_BUS_MASTER_DEHR 75 +#define MSM_BUS_MASTER_QDSS_DAP 76 +#define MSM_BUS_MASTER_TIC 77 +#define MSM_BUS_MASTER_SDCC_1 78 +#define MSM_BUS_MASTER_SDCC_3 79 +#define MSM_BUS_MASTER_SDCC_4 80 +#define MSM_BUS_MASTER_SDCC_2 81 +#define MSM_BUS_MASTER_TSIF 82 +#define MSM_BUS_MASTER_BAM_DMA 83 +#define MSM_BUS_MASTER_BLSP_2 84 +#define MSM_BUS_MASTER_USB_HSIC 85 +#define MSM_BUS_MASTER_BLSP_1 86 +#define MSM_BUS_MASTER_USB_HS 87 +#define MSM_BUS_MASTER_PNOC_CFG 88 +#define MSM_BUS_MASTER_V_OCMEM_GFX3D 89 +#define MSM_BUS_MASTER_IPA 90 +#define MSM_BUS_MASTER_QPIC 91 +#define MSM_BUS_MASTER_MDPE 92 +#define MSM_BUS_MASTER_USB_HS2 93 +#define MSM_BUS_MASTER_VPU 94 +#define MSM_BUS_MASTER_UFS 95 +#define MSM_BUS_MASTER_BCAST 96 +#define MSM_BUS_MASTER_CRYPTO_CORE2 97 +#define MSM_BUS_MASTER_EMAC 98 +#define MSM_BUS_MASTER_VPU_1 99 +#define MSM_BUS_MASTER_PCIE_1 100 +#define MSM_BUS_MASTER_USB3_1 101 +#define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102 +#define MSM_BUS_MASTER_CNOC_MNOC_CFG 103 +#define MSM_BUS_MASTER_TCU_0 104 +#define MSM_BUS_MASTER_TCU_1 105 +#define MSM_BUS_MASTER_CPP 106 +#define MSM_BUS_MASTER_AUDIO 107 +#define MSM_BUS_MASTER_LAST 108 + +#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB +#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB + +#define MSM_BUS_SNOC_MM_INT_0 10000 +#define MSM_BUS_SNOC_MM_INT_1 10001 +#define MSM_BUS_SNOC_MM_INT_2 10002 +#define MSM_BUS_SNOC_MM_INT_BIMC 10003 +#define MSM_BUS_SNOC_INT_0 10004 +#define MSM_BUS_SNOC_INT_1 10005 +#define MSM_BUS_SNOC_INT_BIMC 10006 +#define MSM_BUS_SNOC_BIMC_0_MAS 10007 +#define MSM_BUS_SNOC_BIMC_1_MAS 10008 +#define MSM_BUS_SNOC_QDSS_INT 10009 +#define MSM_BUS_PNOC_SNOC_MAS 10010 +#define MSM_BUS_PNOC_SNOC_SLV 10011 +#define MSM_BUS_PNOC_INT_0 10012 +#define MSM_BUS_PNOC_INT_1 10013 +#define MSM_BUS_PNOC_M_0 10014 +#define MSM_BUS_PNOC_M_1 10015 +#define MSM_BUS_BIMC_SNOC_MAS 10016 +#define MSM_BUS_BIMC_SNOC_SLV 10017 +#define MSM_BUS_PNOC_SLV_0 10018 +#define MSM_BUS_PNOC_SLV_1 10019 +#define MSM_BUS_PNOC_SLV_2 10020 +#define MSM_BUS_PNOC_SLV_3 10021 +#define MSM_BUS_PNOC_SLV_4 10022 +#define MSM_BUS_PNOC_SLV_8 10023 +#define MSM_BUS_PNOC_SLV_9 10024 +#define MSM_BUS_SNOC_BIMC_0_SLV 10025 +#define MSM_BUS_SNOC_BIMC_1_SLV 10026 +#define MSM_BUS_MNOC_BIMC_MAS 10027 +#define MSM_BUS_MNOC_BIMC_SLV 10028 +#define MSM_BUS_BIMC_MNOC_MAS 10029 +#define MSM_BUS_BIMC_MNOC_SLV 10030 +#define MSM_BUS_SNOC_BIMC_MAS 10031 +#define MSM_BUS_SNOC_BIMC_SLV 10032 +#define MSM_BUS_CNOC_SNOC_MAS 10033 +#define MSM_BUS_CNOC_SNOC_SLV 10034 +#define MSM_BUS_SNOC_CNOC_MAS 10035 +#define MSM_BUS_SNOC_CNOC_SLV 10036 +#define MSM_BUS_OVNOC_SNOC_MAS 10037 +#define MSM_BUS_OVNOC_SNOC_SLV 10038 +#define MSM_BUS_SNOC_OVNOC_MAS 10039 +#define MSM_BUS_SNOC_OVNOC_SLV 10040 +#define MSM_BUS_SNOC_PNOC_MAS 10041 +#define MSM_BUS_SNOC_PNOC_SLV 10042 +#define MSM_BUS_BIMC_INT_APPS_EBI 10043 +#define MSM_BUS_BIMC_INT_APPS_SNOC 10044 +#define MSM_BUS_SNOC_BIMC_2_MAS 10045 +#define MSM_BUS_SNOC_BIMC_2_SLV 10046 +#define MSM_BUS_PNOC_SLV_5 10047 +#define MSM_BUS_PNOC_SLV_7 10048 +#define MSM_BUS_PNOC_INT_2 10049 +#define MSM_BUS_PNOC_INT_3 10050 +#define MSM_BUS_PNOC_INT_4 10051 +#define MSM_BUS_PNOC_INT_5 10052 +#define MSM_BUS_PNOC_INT_6 10053 +#define MSM_BUS_PNOC_INT_7 10054 +#define MSM_BUS_INT_LAST 10055 + +#define MSM_BUS_INT_TEST_ID 20000 +#define MSM_BUS_INT_TEST_LAST 20050 + +#define MSM_BUS_SLAVE_FIRST 512 +#define MSM_BUS_SLAVE_EBI_CH0 512 +#define MSM_BUS_SLAVE_EBI_CH1 513 +#define MSM_BUS_SLAVE_AMPSS_L2 514 +#define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515 +#define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516 +#define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517 +#define MSM_BUS_SLAVE_SPS 518 +#define MSM_BUS_SLAVE_SYSTEM_IMEM 519 +#define MSM_BUS_SLAVE_AMPSS 520 +#define MSM_BUS_SLAVE_MSS 521 +#define MSM_BUS_SLAVE_LPASS 522 +#define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523 +#define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524 +#define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525 +#define MSM_BUS_SLAVE_CORESIGHT 526 +#define MSM_BUS_SLAVE_RIVA 527 +#define MSM_BUS_SLAVE_SMI 528 +#define MSM_BUS_MMSS_SLAVE_FAB_APPS 529 +#define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530 +#define MSM_BUS_SLAVE_MM_IMEM 531 +#define MSM_BUS_SLAVE_CRYPTO 532 +#define MSM_BUS_SLAVE_SPDM 533 +#define MSM_BUS_SLAVE_RPM 534 +#define MSM_BUS_SLAVE_RPM_MSG_RAM 535 +#define MSM_BUS_SLAVE_MPM 536 +#define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537 +#define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538 +#define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539 +#define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540 +#define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541 +#define MSM_BUS_SLAVE_GSBI1_UART 542 +#define MSM_BUS_SLAVE_GSBI2_UART 543 +#define MSM_BUS_SLAVE_GSBI3_UART 544 +#define MSM_BUS_SLAVE_GSBI4_UART 545 +#define MSM_BUS_SLAVE_GSBI5_UART 546 +#define MSM_BUS_SLAVE_GSBI6_UART 547 +#define MSM_BUS_SLAVE_GSBI7_UART 548 +#define MSM_BUS_SLAVE_GSBI8_UART 549 +#define MSM_BUS_SLAVE_GSBI9_UART 550 +#define MSM_BUS_SLAVE_GSBI10_UART 551 +#define MSM_BUS_SLAVE_GSBI11_UART 552 +#define MSM_BUS_SLAVE_GSBI12_UART 553 +#define MSM_BUS_SLAVE_GSBI1_QUP 554 +#define MSM_BUS_SLAVE_GSBI2_QUP 555 +#define MSM_BUS_SLAVE_GSBI3_QUP 556 +#define MSM_BUS_SLAVE_GSBI4_QUP 557 +#define MSM_BUS_SLAVE_GSBI5_QUP 558 +#define MSM_BUS_SLAVE_GSBI6_QUP 559 +#define MSM_BUS_SLAVE_GSBI7_QUP 560 +#define MSM_BUS_SLAVE_GSBI8_QUP 561 +#define MSM_BUS_SLAVE_GSBI9_QUP 562 +#define MSM_BUS_SLAVE_GSBI10_QUP 563 +#define MSM_BUS_SLAVE_GSBI11_QUP 564 +#define MSM_BUS_SLAVE_GSBI12_QUP 565 +#define MSM_BUS_SLAVE_EBI2_NAND 566 +#define MSM_BUS_SLAVE_EBI2_CS0 567 +#define MSM_BUS_SLAVE_EBI2_CS1 568 +#define MSM_BUS_SLAVE_EBI2_CS2 569 +#define MSM_BUS_SLAVE_EBI2_CS3 570 +#define MSM_BUS_SLAVE_EBI2_CS4 571 +#define MSM_BUS_SLAVE_EBI2_CS5 572 +#define MSM_BUS_SLAVE_USB_FS1 573 +#define MSM_BUS_SLAVE_USB_FS2 574 +#define MSM_BUS_SLAVE_TSIF 575 +#define MSM_BUS_SLAVE_MSM_TSSC 576 +#define MSM_BUS_SLAVE_MSM_PDM 577 +#define MSM_BUS_SLAVE_MSM_DIMEM 578 +#define MSM_BUS_SLAVE_MSM_TCSR 579 +#define MSM_BUS_SLAVE_MSM_PRNG 580 +#define MSM_BUS_SLAVE_GSS 581 +#define MSM_BUS_SLAVE_SATA 582 +#define MSM_BUS_SLAVE_USB3 583 +#define MSM_BUS_SLAVE_WCSS 584 +#define MSM_BUS_SLAVE_OCIMEM 585 +#define MSM_BUS_SLAVE_SNOC_OCMEM 586 +#define MSM_BUS_SLAVE_SERVICE_SNOC 587 +#define MSM_BUS_SLAVE_QDSS_STM 588 +#define MSM_BUS_SLAVE_CAMERA_CFG 589 +#define MSM_BUS_SLAVE_DISPLAY_CFG 590 +#define MSM_BUS_SLAVE_OCMEM_CFG 591 +#define MSM_BUS_SLAVE_CPR_CFG 592 +#define MSM_BUS_SLAVE_CPR_XPU_CFG 593 +#define MSM_BUS_SLAVE_MISC_CFG 594 +#define MSM_BUS_SLAVE_MISC_XPU_CFG 595 +#define MSM_BUS_SLAVE_VENUS_CFG 596 +#define MSM_BUS_SLAVE_MISC_VENUS_CFG 597 +#define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598 +#define MSM_BUS_SLAVE_MMSS_CLK_CFG 599 +#define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600 +#define MSM_BUS_SLAVE_MNOC_MPU_CFG 601 +#define MSM_BUS_SLAVE_ONOC_MPU_CFG 602 +#define MSM_BUS_SLAVE_SERVICE_MNOC 603 +#define MSM_BUS_SLAVE_OCMEM 604 +#define MSM_BUS_SLAVE_SERVICE_ONOC 605 +#define MSM_BUS_SLAVE_SDCC_1 606 +#define MSM_BUS_SLAVE_SDCC_3 607 +#define MSM_BUS_SLAVE_SDCC_2 608 +#define MSM_BUS_SLAVE_SDCC_4 609 +#define MSM_BUS_SLAVE_BAM_DMA 610 +#define MSM_BUS_SLAVE_BLSP_2 611 +#define MSM_BUS_SLAVE_USB_HSIC 612 +#define MSM_BUS_SLAVE_BLSP_1 613 +#define MSM_BUS_SLAVE_USB_HS 614 +#define MSM_BUS_SLAVE_PDM 615 +#define MSM_BUS_SLAVE_PERIPH_APU_CFG 616 +#define MSM_BUS_SLAVE_PNOC_MPU_CFG 617 +#define MSM_BUS_SLAVE_PRNG 618 +#define MSM_BUS_SLAVE_SERVICE_PNOC 619 +#define MSM_BUS_SLAVE_CLK_CTL 620 +#define MSM_BUS_SLAVE_CNOC_MSS 621 +#define MSM_BUS_SLAVE_SECURITY 622 +#define MSM_BUS_SLAVE_TCSR 623 +#define MSM_BUS_SLAVE_TLMM 624 +#define MSM_BUS_SLAVE_CRYPTO_0_CFG 625 +#define MSM_BUS_SLAVE_CRYPTO_1_CFG 626 +#define MSM_BUS_SLAVE_IMEM_CFG 627 +#define MSM_BUS_SLAVE_MESSAGE_RAM 628 +#define MSM_BUS_SLAVE_BIMC_CFG 629 +#define MSM_BUS_SLAVE_BOOT_ROM 630 +#define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631 +#define MSM_BUS_SLAVE_PMIC_ARB 632 +#define MSM_BUS_SLAVE_SPDM_WRAPPER 633 +#define MSM_BUS_SLAVE_DEHR_CFG 634 +#define MSM_BUS_SLAVE_QDSS_CFG 635 +#define MSM_BUS_SLAVE_RBCPR_CFG 636 +#define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637 +#define MSM_BUS_SLAVE_SNOC_MPU_CFG 638 +#define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639 +#define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640 +#define MSM_BUS_SLAVE_PNOC_CFG 641 +#define MSM_BUS_SLAVE_SNOC_CFG 642 +#define MSM_BUS_SLAVE_EBI1_DLL_CFG 643 +#define MSM_BUS_SLAVE_PHY_APU_CFG 644 +#define MSM_BUS_SLAVE_EBI1_PHY_CFG 645 +#define MSM_BUS_SLAVE_SERVICE_CNOC 646 +#define MSM_BUS_SLAVE_IPS_CFG 647 +#define MSM_BUS_SLAVE_QPIC 648 +#define MSM_BUS_SLAVE_DSI_CFG 649 +#define MSM_BUS_SLAVE_UFS_CFG 650 +#define MSM_BUS_SLAVE_RBCPR_CX_CFG 651 +#define MSM_BUS_SLAVE_RBCPR_MX_CFG 652 +#define MSM_BUS_SLAVE_PCIE_CFG 653 +#define MSM_BUS_SLAVE_USB_PHYS_CFG 654 +#define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655 +#define MSM_BUS_SLAVE_AVSYNC_CFG 656 +#define MSM_BUS_SLAVE_CRYPTO_2_CFG 657 +#define MSM_BUS_SLAVE_VPU_CFG 658 +#define MSM_BUS_SLAVE_BCAST_CFG 659 +#define MSM_BUS_SLAVE_KLM_CFG 660 +#define MSM_BUS_SLAVE_GENI_IR_CFG 661 +#define MSM_BUS_SLAVE_OCMEM_GFX 662 +#define MSM_BUS_SLAVE_CATS_128 663 +#define MSM_BUS_SLAVE_OCMEM_64 664 +#define MSM_BUS_SLAVE_PCIE_0 665 +#define MSM_BUS_SLAVE_PCIE_1 666 +#define MSM_BUS_SLAVE_PCIE_0_CFG 667 +#define MSM_BUS_SLAVE_PCIE_1_CFG 668 +#define MSM_BUS_SLAVE_SRVC_MNOC 669 +#define MSM_BUS_SLAVE_USB_HS2 670 +#define MSM_BUS_SLAVE_AUDIO 671 +#define MSM_BUS_SLAVE_TCU 672 +#define MSM_BUS_SLAVE_APPSS 673 +#define MSM_BUS_SLAVE_PCIE_PARF 674 +#define MSM_BUS_SLAVE_USB3_PHY_CFG 675 +#define MSM_BUS_SLAVE_IPA_CFG 676 +#define MSM_BUS_SLAVE_LAST 677 + +#define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB +#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB + +/* + * ID's used in RPM messages + */ +#define ICBID_MASTER_APPSS_PROC 0 +#define ICBID_MASTER_MSS_PROC 1 +#define ICBID_MASTER_MNOC_BIMC 2 +#define ICBID_MASTER_SNOC_BIMC 3 +#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC +#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4 +#define ICBID_MASTER_CNOC_MNOC_CFG 5 +#define ICBID_MASTER_GFX3D 6 +#define ICBID_MASTER_JPEG 7 +#define ICBID_MASTER_MDP 8 +#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP +#define ICBID_MASTER_MDPS ICBID_MASTER_MDP +#define ICBID_MASTER_VIDEO 9 +#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO +#define ICBID_MASTER_VIDEO_P1 10 +#define ICBID_MASTER_VFE 11 +#define ICBID_MASTER_CNOC_ONOC_CFG 12 +#define ICBID_MASTER_JPEG_OCMEM 13 +#define ICBID_MASTER_MDP_OCMEM 14 +#define ICBID_MASTER_VIDEO_P0_OCMEM 15 +#define ICBID_MASTER_VIDEO_P1_OCMEM 16 +#define ICBID_MASTER_VFE_OCMEM 17 +#define ICBID_MASTER_LPASS_AHB 18 +#define ICBID_MASTER_QDSS_BAM 19 +#define ICBID_MASTER_SNOC_CFG 20 +#define ICBID_MASTER_BIMC_SNOC 21 +#define ICBID_MASTER_CNOC_SNOC 22 +#define ICBID_MASTER_CRYPTO 23 +#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO +#define ICBID_MASTER_CRYPTO_CORE1 24 +#define ICBID_MASTER_LPASS_PROC 25 +#define ICBID_MASTER_MSS 26 +#define ICBID_MASTER_MSS_NAV 27 +#define ICBID_MASTER_OCMEM_DMA 28 +#define ICBID_MASTER_PNOC_SNOC 29 +#define ICBID_MASTER_WCSS 30 +#define ICBID_MASTER_QDSS_ETR 31 +#define ICBID_MASTER_USB3 32 +#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3 +#define ICBID_MASTER_SDCC_1 33 +#define ICBID_MASTER_SDCC_3 34 +#define ICBID_MASTER_SDCC_2 35 +#define ICBID_MASTER_SDCC_4 36 +#define ICBID_MASTER_TSIF 37 +#define ICBID_MASTER_BAM_DMA 38 +#define ICBID_MASTER_BLSP_2 39 +#define ICBID_MASTER_USB_HSIC 40 +#define ICBID_MASTER_BLSP_1 41 +#define ICBID_MASTER_USB_HS 42 +#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS +#define ICBID_MASTER_PNOC_CFG 43 +#define ICBID_MASTER_SNOC_PNOC 44 +#define ICBID_MASTER_RPM_INST 45 +#define ICBID_MASTER_RPM_DATA 46 +#define ICBID_MASTER_RPM_SYS 47 +#define ICBID_MASTER_DEHR 48 +#define ICBID_MASTER_QDSS_DAP 49 +#define ICBID_MASTER_SPDM 50 +#define ICBID_MASTER_TIC 51 +#define ICBID_MASTER_SNOC_CNOC 52 +#define ICBID_MASTER_GFX3D_OCMEM 53 +#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM +#define ICBID_MASTER_OVIRT_SNOC 54 +#define ICBID_MASTER_SNOC_OVIRT 55 +#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT +#define ICBID_MASTER_ONOC_OVIRT 56 +#define ICBID_MASTER_USB_HS2 57 +#define ICBID_MASTER_QPIC 58 +#define ICBID_MASTER_IPA 59 +#define ICBID_MASTER_DSI 60 +#define ICBID_MASTER_MDP1 61 +#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1 +#define ICBID_MASTER_VPU_PROC 62 +#define ICBID_MASTER_VPU 63 +#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU +#define ICBID_MASTER_CRYPTO_CORE2 64 +#define ICBID_MASTER_PCIE_0 65 +#define ICBID_MASTER_PCIE_1 66 +#define ICBID_MASTER_SATA 67 +#define ICBID_MASTER_UFS 68 +#define ICBID_MASTER_USB3_1 69 +#define ICBID_MASTER_VIDEO_OCMEM 70 +#define ICBID_MASTER_VPU1 71 +#define ICBID_MASTER_VCAP 72 +#define ICBID_MASTER_EMAC 73 +#define ICBID_MASTER_BCAST 74 +#define ICBID_MASTER_MMSS_PROC 75 +#define ICBID_MASTER_SNOC_BIMC_1 76 +#define ICBID_MASTER_SNOC_PCNOC 77 +#define ICBID_MASTER_AUDIO 78 +#define ICBID_MASTER_MM_INT_0 79 +#define ICBID_MASTER_MM_INT_1 80 +#define ICBID_MASTER_MM_INT_2 81 +#define ICBID_MASTER_MM_INT_BIMC 82 +#define ICBID_MASTER_MSS_INT 83 +#define ICBID_MASTER_PCNOC_CFG 84 +#define ICBID_MASTER_PCNOC_INT_0 85 +#define ICBID_MASTER_PCNOC_INT_1 86 +#define ICBID_MASTER_PCNOC_M_0 87 +#define ICBID_MASTER_PCNOC_M_1 88 +#define ICBID_MASTER_PCNOC_S_0 89 +#define ICBID_MASTER_PCNOC_S_1 90 +#define ICBID_MASTER_PCNOC_S_2 91 +#define ICBID_MASTER_PCNOC_S_3 92 +#define ICBID_MASTER_PCNOC_S_4 93 +#define ICBID_MASTER_PCNOC_S_6 94 +#define ICBID_MASTER_PCNOC_S_7 95 +#define ICBID_MASTER_PCNOC_S_8 96 +#define ICBID_MASTER_PCNOC_S_9 97 +#define ICBID_MASTER_QDSS_INT 98 +#define ICBID_MASTER_SNOC_INT_0 99 +#define ICBID_MASTER_SNOC_INT_1 100 +#define ICBID_MASTER_SNOC_INT_BIMC 101 +#define ICBID_MASTER_TCU_0 102 +#define ICBID_MASTER_TCU_1 103 +#define ICBID_MASTER_BIMC_INT_0 104 +#define ICBID_MASTER_BIMC_INT_1 105 +#define ICBID_MASTER_CAMERA 106 +#define ICBID_MASTER_RICA 107 +#define ICBID_MASTER_PCNOC_S_5 129 +#define ICBID_MASTER_PCNOC_INT_2 124 +#define ICBID_MASTER_PCNOC_INT_3 125 +#define ICBID_MASTER_PCNOC_INT_4 126 +#define ICBID_MASTER_PCNOC_INT_5 127 +#define ICBID_MASTER_PCNOC_INT_6 128 + +#define ICBID_SLAVE_EBI1 0 +#define ICBID_SLAVE_APPSS_L2 1 +#define ICBID_SLAVE_BIMC_SNOC 2 +#define ICBID_SLAVE_CAMERA_CFG 3 +#define ICBID_SLAVE_DISPLAY_CFG 4 +#define ICBID_SLAVE_OCMEM_CFG 5 +#define ICBID_SLAVE_CPR_CFG 6 +#define ICBID_SLAVE_CPR_XPU_CFG 7 +#define ICBID_SLAVE_MISC_CFG 8 +#define ICBID_SLAVE_MISC_XPU_CFG 9 +#define ICBID_SLAVE_VENUS_CFG 10 +#define ICBID_SLAVE_GFX3D_CFG 11 +#define ICBID_SLAVE_MMSS_CLK_CFG 12 +#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13 +#define ICBID_SLAVE_MNOC_MPU_CFG 14 +#define ICBID_SLAVE_ONOC_MPU_CFG 15 +#define ICBID_SLAVE_MNOC_BIMC 16 +#define ICBID_SLAVE_SERVICE_MNOC 17 +#define ICBID_SLAVE_OCMEM 18 +#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM +#define ICBID_SLAVE_SERVICE_ONOC 19 +#define ICBID_SLAVE_APPSS 20 +#define ICBID_SLAVE_LPASS 21 +#define ICBID_SLAVE_USB3 22 +#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3 +#define ICBID_SLAVE_WCSS 23 +#define ICBID_SLAVE_SNOC_BIMC 24 +#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC +#define ICBID_SLAVE_SNOC_CNOC 25 +#define ICBID_SLAVE_IMEM 26 +#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM +#define ICBID_SLAVE_SNOC_OVIRT 27 +#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT +#define ICBID_SLAVE_SNOC_PNOC 28 +#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC +#define ICBID_SLAVE_SERVICE_SNOC 29 +#define ICBID_SLAVE_QDSS_STM 30 +#define ICBID_SLAVE_SDCC_1 31 +#define ICBID_SLAVE_SDCC_3 32 +#define ICBID_SLAVE_SDCC_2 33 +#define ICBID_SLAVE_SDCC_4 34 +#define ICBID_SLAVE_TSIF 35 +#define ICBID_SLAVE_BAM_DMA 36 +#define ICBID_SLAVE_BLSP_2 37 +#define ICBID_SLAVE_USB_HSIC 38 +#define ICBID_SLAVE_BLSP_1 39 +#define ICBID_SLAVE_USB_HS 40 +#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS +#define ICBID_SLAVE_PDM 41 +#define ICBID_SLAVE_PERIPH_APU_CFG 42 +#define ICBID_SLAVE_PNOC_MPU_CFG 43 +#define ICBID_SLAVE_PRNG 44 +#define ICBID_SLAVE_PNOC_SNOC 45 +#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC +#define ICBID_SLAVE_SERVICE_PNOC 46 +#define ICBID_SLAVE_CLK_CTL 47 +#define ICBID_SLAVE_CNOC_MSS 48 +#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS +#define ICBID_SLAVE_SECURITY 49 +#define ICBID_SLAVE_TCSR 50 +#define ICBID_SLAVE_TLMM 51 +#define ICBID_SLAVE_CRYPTO_0_CFG 52 +#define ICBID_SLAVE_CRYPTO_1_CFG 53 +#define ICBID_SLAVE_IMEM_CFG 54 +#define ICBID_SLAVE_MESSAGE_RAM 55 +#define ICBID_SLAVE_BIMC_CFG 56 +#define ICBID_SLAVE_BOOT_ROM 57 +#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58 +#define ICBID_SLAVE_PMIC_ARB 59 +#define ICBID_SLAVE_SPDM_WRAPPER 60 +#define ICBID_SLAVE_DEHR_CFG 61 +#define ICBID_SLAVE_MPM 62 +#define ICBID_SLAVE_QDSS_CFG 63 +#define ICBID_SLAVE_RBCPR_CFG 64 +#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG +#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65 +#define ICBID_SLAVE_CNOC_MNOC_CFG 66 +#define ICBID_SLAVE_SNOC_MPU_CFG 67 +#define ICBID_SLAVE_CNOC_ONOC_CFG 68 +#define ICBID_SLAVE_PNOC_CFG 69 +#define ICBID_SLAVE_SNOC_CFG 70 +#define ICBID_SLAVE_EBI1_DLL_CFG 71 +#define ICBID_SLAVE_PHY_APU_CFG 72 +#define ICBID_SLAVE_EBI1_PHY_CFG 73 +#define ICBID_SLAVE_RPM 74 +#define ICBID_SLAVE_CNOC_SNOC 75 +#define ICBID_SLAVE_SERVICE_CNOC 76 +#define ICBID_SLAVE_OVIRT_SNOC 77 +#define ICBID_SLAVE_OVIRT_OCMEM 78 +#define ICBID_SLAVE_USB_HS2 79 +#define ICBID_SLAVE_QPIC 80 +#define ICBID_SLAVE_IPS_CFG 81 +#define ICBID_SLAVE_DSI_CFG 82 +#define ICBID_SLAVE_USB3_1 83 +#define ICBID_SLAVE_PCIE_0 84 +#define ICBID_SLAVE_PCIE_1 85 +#define ICBID_SLAVE_PSS_SMMU_CFG 86 +#define ICBID_SLAVE_CRYPTO_2_CFG 87 +#define ICBID_SLAVE_PCIE_0_CFG 88 +#define ICBID_SLAVE_PCIE_1_CFG 89 +#define ICBID_SLAVE_SATA_CFG 90 +#define ICBID_SLAVE_SPSS_GENI_IR 91 +#define ICBID_SLAVE_UFS_CFG 92 +#define ICBID_SLAVE_AVSYNC_CFG 93 +#define ICBID_SLAVE_VPU_CFG 94 +#define ICBID_SLAVE_USB_PHY_CFG 95 +#define ICBID_SLAVE_RBCPR_MX_CFG 96 +#define ICBID_SLAVE_PCIE_PARF 97 +#define ICBID_SLAVE_VCAP_CFG 98 +#define ICBID_SLAVE_EMAC_CFG 99 +#define ICBID_SLAVE_BCAST_CFG 100 +#define ICBID_SLAVE_KLM_CFG 101 +#define ICBID_SLAVE_DISPLAY_PWM 102 +#define ICBID_SLAVE_GENI 103 +#define ICBID_SLAVE_SNOC_BIMC_1 104 +#define ICBID_SLAVE_AUDIO 105 +#define ICBID_SLAVE_CATS_0 106 +#define ICBID_SLAVE_CATS_1 107 +#define ICBID_SLAVE_MM_INT_0 108 +#define ICBID_SLAVE_MM_INT_1 109 +#define ICBID_SLAVE_MM_INT_2 110 +#define ICBID_SLAVE_MM_INT_BIMC 111 +#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112 +#define ICBID_SLAVE_MSS_INT 113 +#define ICBID_SLAVE_PCNOC_INT_0 114 +#define ICBID_SLAVE_PCNOC_INT_1 115 +#define ICBID_SLAVE_PCNOC_M_0 116 +#define ICBID_SLAVE_PCNOC_M_1 117 +#define ICBID_SLAVE_PCNOC_S_0 118 +#define ICBID_SLAVE_PCNOC_S_1 119 +#define ICBID_SLAVE_PCNOC_S_2 120 +#define ICBID_SLAVE_PCNOC_S_3 121 +#define ICBID_SLAVE_PCNOC_S_4 122 +#define ICBID_SLAVE_PCNOC_S_6 123 +#define ICBID_SLAVE_PCNOC_S_7 124 +#define ICBID_SLAVE_PCNOC_S_8 125 +#define ICBID_SLAVE_PCNOC_S_9 126 +#define ICBID_SLAVE_PRNG_XPU_CFG 127 +#define ICBID_SLAVE_QDSS_INT 128 +#define ICBID_SLAVE_RPM_XPU_CFG 129 +#define ICBID_SLAVE_SNOC_INT_0 130 +#define ICBID_SLAVE_SNOC_INT_1 131 +#define ICBID_SLAVE_SNOC_INT_BIMC 132 +#define ICBID_SLAVE_TCU 133 +#define ICBID_SLAVE_BIMC_INT_0 134 +#define ICBID_SLAVE_BIMC_INT_1 135 +#define ICBID_SLAVE_RICA_CFG 136 +#define ICBID_SLAVE_PCNOC_S_5 189 +#define ICBID_SLAVE_PCNOC_INT_2 184 +#define ICBID_SLAVE_PCNOC_INT_3 185 +#define ICBID_SLAVE_PCNOC_INT_4 186 +#define ICBID_SLAVE_PCNOC_INT_5 187 +#define ICBID_SLAVE_PCNOC_INT_6 188 +#define ICBID_SLAVE_USB3_PHY_CFG 182 +#define ICBID_SLAVE_IPA_CFG 183 + +#endif diff --git a/include/dt-bindings/soc/msm-bus-rule-ops.h b/include/dt-bindings/soc/msm-bus-rule-ops.h new file mode 100644 index 000000000000..5ef9c6d77ff1 --- /dev/null +++ b/include/dt-bindings/soc/msm-bus-rule-ops.h @@ -0,0 +1,32 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MSM_BUS_RULE_OPS_H +#define __MSM_BUS_RULE_OPS_H + +#define FLD_IB 0 +#define FLD_AB 1 +#define FLD_CLK 2 + +#define OP_LE 0 +#define OP_LT 1 +#define OP_GE 2 +#define OP_GT 3 +#define OP_NOOP 4 + +#define RULE_STATE_NOT_APPLIED 0 +#define RULE_STATE_APPLIED 1 + +#define THROTTLE_ON 0 +#define THROTTLE_OFF 1 + +#endif diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index a428aec36ace..31fb4eb4294a 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -206,6 +206,8 @@ struct clk_ops { struct clk_rate_request *req); int (*set_parent)(struct clk_hw *hw, u8 index); u8 (*get_parent)(struct clk_hw *hw); + struct clk_hw *(*get_safe_parent)(struct clk_hw *hw, + unsigned long *safe_freq); int (*set_rate)(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate); int (*set_rate_and_parent)(struct clk_hw *hw, @@ -466,7 +468,7 @@ void clk_hw_unregister_divider(struct clk_hw *hw); struct clk_mux { struct clk_hw hw; void __iomem *reg; - u32 *table; + unsigned int *table; u32 mask; u8 shift; u8 flags; @@ -484,6 +486,11 @@ struct clk_mux { extern const struct clk_ops clk_mux_ops; extern const struct clk_ops clk_mux_ro_ops; +unsigned int clk_mux_get_parent(struct clk_hw *hw, unsigned int val, + unsigned int *table, unsigned long flags); +unsigned int clk_mux_reindex(u8 index, unsigned int *table, + unsigned long flags); + struct clk *clk_register_mux(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, @@ -499,12 +506,12 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, - u8 clk_mux_flags, u32 *table, spinlock_t *lock); + u8 clk_mux_flags, unsigned int *table, spinlock_t *lock); struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name, const char * const *parent_names, u8 num_parents, unsigned long flags, void __iomem *reg, u8 shift, u32 mask, - u8 clk_mux_flags, u32 *table, spinlock_t *lock); + u8 clk_mux_flags, unsigned int *table, spinlock_t *lock); void clk_unregister_mux(struct clk *clk); void clk_hw_unregister_mux(struct clk_hw *hw); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 32dc0cbd51ca..9c0a4b5e32ba 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -73,6 +73,8 @@ struct cpufreq_policy { unsigned int cpu; /* cpu managing this policy, must be online */ struct clk *clk; + struct clk *l2_clk; /* L2 clock */ + unsigned int l2_rate[3]; /* L2 bus clock rate thresholds */ struct cpufreq_cpuinfo cpuinfo;/* see above */ unsigned int min; /* in kHz */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 08528afdf58b..6f3e6cafe6ae 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -702,6 +702,9 @@ dma_mark_declared_memory_occupied(struct device *dev, } #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ +int dma_configure(struct device *dev); +void dma_deconfigure(struct device *dev); + /* * Managed DMA API */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 436dc21318af..f2960e4de344 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -351,6 +351,9 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, const struct iommu_ops *ops); void iommu_fwspec_free(struct device *dev); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); +void iommu_register_instance(struct fwnode_handle *fwnode, + const struct iommu_ops *ops); +const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode); #else /* CONFIG_IOMMU_API */ @@ -580,6 +583,17 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, return -ENODEV; } +static inline void iommu_register_instance(struct fwnode_handle *fwnode, + const struct iommu_ops *ops) +{ +} + +static inline +const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) +{ + return NULL; +} + #endif /* CONFIG_IOMMU_API */ #endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/msm-bus-board.h b/include/linux/msm-bus-board.h new file mode 100644 index 000000000000..d2be55269438 --- /dev/null +++ b/include/linux/msm-bus-board.h @@ -0,0 +1,198 @@ +/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __ASM_ARCH_MSM_BUS_BOARD_H +#define __ASM_ARCH_MSM_BUS_BOARD_H + +#include <linux/types.h> +#include <linux/input.h> + +enum context { + DUAL_CTX, + ACTIVE_CTX, + NUM_CTX +}; + +struct msm_bus_fabric_registration { + unsigned int id; + const char *name; + struct msm_bus_node_info *info; + unsigned int len; + int ahb; + const char *fabclk[NUM_CTX]; + const char *iface_clk; + unsigned int offset; + unsigned int haltid; + unsigned int rpm_enabled; + unsigned int nmasters; + unsigned int nslaves; + unsigned int ntieredslaves; + bool il_flag; + const struct msm_bus_board_algorithm *board_algo; + int hw_sel; + void *hw_data; + uint32_t qos_freq; + uint32_t qos_baseoffset; + u64 nr_lim_thresh; + uint32_t eff_fact; + uint32_t qos_delta; + bool virt; +}; + +struct msm_bus_device_node_registration { + struct msm_bus_node_device_type *info; + unsigned int num_devices; + bool virt; +}; + +enum msm_bus_bw_tier_type { + MSM_BUS_BW_TIER1 = 1, + MSM_BUS_BW_TIER2, + MSM_BUS_BW_COUNT, + MSM_BUS_BW_SIZE = 0x7FFFFFFF, +}; + +struct msm_bus_halt_vector { + uint32_t haltval; + uint32_t haltmask; +}; + +extern struct msm_bus_fabric_registration msm_bus_apps_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_sys_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_mm_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_sys_fpb_pdata; +extern struct msm_bus_fabric_registration msm_bus_cpss_fpb_pdata; +extern struct msm_bus_fabric_registration msm_bus_def_fab_pdata; + +extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata; +extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata; + +extern struct msm_bus_fabric_registration msm_bus_8064_apps_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_8064_sys_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_8064_mm_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_8064_sys_fpb_pdata; +extern struct msm_bus_fabric_registration msm_bus_8064_cpss_fpb_pdata; + +extern struct msm_bus_fabric_registration msm_bus_9615_sys_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_9615_def_fab_pdata; + +extern struct msm_bus_fabric_registration msm_bus_8930_apps_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_8930_sys_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_8930_mm_fabric_pdata; +extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata; +extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata; + +extern struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata; +extern struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata; +extern struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata; +extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata; +extern struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata; +extern struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata; +extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata; + +extern struct msm_bus_fabric_registration msm_bus_9625_sys_noc_pdata; +extern struct msm_bus_fabric_registration msm_bus_9625_bimc_pdata; +extern struct msm_bus_fabric_registration msm_bus_9625_periph_noc_pdata; +extern struct msm_bus_fabric_registration msm_bus_9625_config_noc_pdata; + +extern int msm_bus_device_match_adhoc(struct device *dev, void *id); + +void msm_bus_rpm_set_mt_mask(void); +int msm_bus_board_rpm_get_il_ids(uint16_t *id); +int msm_bus_board_get_iid(int id); + +#define NFAB_MSM8226 6 +#define NFAB_MSM8610 5 + +/* + * These macros specify the convention followed for allocating + * ids to fabrics, masters and slaves for 8x60. + * + * A node can be identified as a master/slave/fabric by using + * these ids. + */ +#define FABRIC_ID_KEY 1024 +#define SLAVE_ID_KEY ((FABRIC_ID_KEY) >> 1) +#define MAX_FAB_KEY 7168 /* OR(All fabric ids) */ +#define INT_NODE_START 10000 + +#define GET_FABID(id) ((id) & MAX_FAB_KEY) + +#define NODE_ID(id) ((id) & (FABRIC_ID_KEY - 1)) +#define IS_SLAVE(id) ((NODE_ID(id)) >= SLAVE_ID_KEY ? 1 : 0) +#define CHECK_ID(iid, id) (((iid & id) != id) ? -ENXIO : iid) + +/* + * The following macros are used to format the data for port halt + * and unhalt requests. + */ +#define MSM_BUS_CLK_HALT 0x1 +#define MSM_BUS_CLK_HALT_MASK 0x1 +#define MSM_BUS_CLK_HALT_FIELDSIZE 0x1 +#define MSM_BUS_CLK_UNHALT 0x0 + +#define MSM_BUS_MASTER_SHIFT(master, fieldsize) \ + ((master) * (fieldsize)) + +#define MSM_BUS_SET_BITFIELD(word, fieldmask, fieldvalue) \ + { \ + (word) &= ~(fieldmask); \ + (word) |= (fieldvalue); \ + } + + +#define MSM_BUS_MASTER_HALT(u32haltmask, u32haltval, master) \ + MSM_BUS_SET_BITFIELD(u32haltmask, \ + MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\ + MSM_BUS_CLK_HALT_FIELDSIZE), \ + MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\ + MSM_BUS_CLK_HALT_FIELDSIZE))\ + MSM_BUS_SET_BITFIELD(u32haltval, \ + MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\ + MSM_BUS_CLK_HALT_FIELDSIZE), \ + MSM_BUS_CLK_HALT<<MSM_BUS_MASTER_SHIFT((master),\ + MSM_BUS_CLK_HALT_FIELDSIZE))\ + +#define MSM_BUS_MASTER_UNHALT(u32haltmask, u32haltval, master) \ + MSM_BUS_SET_BITFIELD(u32haltmask, \ + MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\ + MSM_BUS_CLK_HALT_FIELDSIZE), \ + MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\ + MSM_BUS_CLK_HALT_FIELDSIZE))\ + MSM_BUS_SET_BITFIELD(u32haltval, \ + MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\ + MSM_BUS_CLK_HALT_FIELDSIZE), \ + MSM_BUS_CLK_UNHALT<<MSM_BUS_MASTER_SHIFT((master),\ + MSM_BUS_CLK_HALT_FIELDSIZE))\ + +#define RPM_BUS_SLAVE_REQ 0x766c7362 +#define RPM_BUS_MASTER_REQ 0x73616d62 + +enum msm_bus_rpm_slave_field_type { + RPM_SLAVE_FIELD_BW = 0x00007762, +}; + +enum msm_bus_rpm_mas_field_type { + RPM_MASTER_FIELD_BW = 0x00007762, + RPM_MASTER_FIELD_BW_T0 = 0x30747762, + RPM_MASTER_FIELD_BW_T1 = 0x31747762, + RPM_MASTER_FIELD_BW_T2 = 0x32747762, +}; + +#include <dt-bindings/soc/msm-bus-ids.h> + + +#endif /*__ASM_ARCH_MSM_BUS_BOARD_H */ diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h new file mode 100644 index 000000000000..eccd2ae6b4e6 --- /dev/null +++ b/include/linux/msm-bus.h @@ -0,0 +1,200 @@ +/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ARCH_ARM_MACH_MSM_BUS_H +#define _ARCH_ARM_MACH_MSM_BUS_H + +#include <linux/types.h> +#include <linux/input.h> +#include <linux/platform_device.h> + +/* + * Macros for clients to convert their data to ib and ab + * Ws : Time window over which to transfer the data in SECONDS + * Bs : Size of the data block in bytes + * Per : Recurrence period + * Tb : Throughput bandwidth to prevent stalling + * R : Ratio of actual bandwidth used to Tb + * Ib : Instantaneous bandwidth + * Ab : Arbitrated bandwidth + * + * IB_RECURRBLOCK and AB_RECURRBLOCK: + * These are used if the requirement is to transfer a + * recurring block of data over a known time window. + * + * IB_THROUGHPUTBW and AB_THROUGHPUTBW: + * These are used for CPU style masters. Here the requirement + * is to have minimum throughput bandwidth available to avoid + * stalling. + */ +#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws))) +#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per))) +#define IB_THROUGHPUTBW(Tb) (Tb) +#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R)) + +struct qcom_msm_bus_req { + u32 key; + u32 nbytes; + u64 value; +}; + +struct msm_bus_vectors { + int src; /* Master */ + int dst; /* Slave */ + uint64_t ab; /* Arbitrated bandwidth */ + uint64_t ib; /* Instantaneous bandwidth */ +}; + +struct msm_bus_paths { + int num_paths; + struct msm_bus_vectors *vectors; +}; + +struct msm_bus_scale_pdata { + struct msm_bus_paths *usecase; + int num_usecases; + const char *name; + /* + * If the active_only flag is set to 1, the BW request is applied + * only when at least one CPU is active (powered on). If the flag + * is set to 0, then the BW request is always applied irrespective + * of the CPU state. + */ + unsigned int active_only; +}; + +struct msm_bus_client_handle { + char *name; + int mas; + int slv; + int first_hop; + u64 cur_ib; + u64 cur_ab; + bool active_only; +}; + +int qcom_rpm_bus_send_message(int ctx, int type, int id, + struct qcom_msm_bus_req *req); + +/* Scaling APIs */ + +/* + * This function returns a handle to the client. This should be used to + * call msm_bus_scale_client_update_request. + * The function returns 0 if bus driver is unable to register a client + */ + +#if (defined(CONFIG_MSM_BUS_SCALING) || defined(CONFIG_BUS_TOPOLOGY_ADHOC)) +int __init msm_bus_fabric_init_driver(void); +uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata); +int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index); +void msm_bus_scale_unregister_client(uint32_t cl); + +struct msm_bus_client_handle* +msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name, + bool active_only); +void msm_bus_scale_unregister(struct msm_bus_client_handle *cl); +int msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib); +/* AXI Port configuration APIs */ +int msm_bus_axi_porthalt(int master_port); +int msm_bus_axi_portunhalt(int master_port); + +#else +static inline int __init msm_bus_fabric_init_driver(void) { return 0; } +static struct msm_bus_client_handle dummy_cl; + +static inline uint32_t +msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata) +{ + return 1; +} + +static inline int +msm_bus_scale_client_update_request(uint32_t cl, unsigned int index) +{ + return 0; +} + +static inline void +msm_bus_scale_unregister_client(uint32_t cl) +{ +} + +static inline int msm_bus_axi_porthalt(int master_port) +{ + return 0; +} + +static inline int msm_bus_axi_portunhalt(int master_port) +{ + return 0; +} + +static inline struct msm_bus_client_handle* +msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name, + bool active_only) +{ + return &dummy_cl; +} + +static inline void msm_bus_scale_unregister(struct msm_bus_client_handle *cl) +{ +} + +static inline int +msm_bus_scale_update_bw(uint32_t cl, u64 ab, u64 ib) +{ + return 0; +} + +#endif + +#if defined(CONFIG_OF) && defined(CONFIG_MSM_BUS_SCALING) +struct msm_bus_scale_pdata *msm_bus_pdata_from_node( + struct platform_device *pdev, struct device_node *of_node); +struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev); +void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata); +#else +static inline struct msm_bus_scale_pdata +*msm_bus_cl_get_pdata(struct platform_device *pdev) +{ + return NULL; +} + +static inline struct msm_bus_scale_pdata *msm_bus_pdata_from_node( + struct platform_device *pdev, struct device_node *of_node) +{ + return NULL; +} + +static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata) +{ +} +#endif + +#ifdef CONFIG_DEBUG_BUS_VOTER +int msm_bus_floor_vote_context(const char *name, u64 floor_hz, + bool active_only); +int msm_bus_floor_vote(const char *name, u64 floor_hz); +#else +static inline int msm_bus_floor_vote(const char *name, u64 floor_hz) +{ + return -EINVAL; +} + +static inline int msm_bus_floor_vote_context(const char *name, u64 floor_hz, + bool active_only) +{ + return -EINVAL; +} +#endif /*defined(CONFIG_DEBUG_BUS_VOTER) && defined(CONFIG_BUS_TOPOLOGY_ADHOC)*/ +#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/ diff --git a/include/linux/msm_bus_rules.h b/include/linux/msm_bus_rules.h new file mode 100644 index 000000000000..a045223dc7a8 --- /dev/null +++ b/include/linux/msm_bus_rules.h @@ -0,0 +1,77 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ARCH_ARM_MACH_MSM_BUS_RULES_H +#define _ARCH_ARM_MACH_MSM_BUS_RULES_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/notifier.h> +#include <dt-bindings/soc/msm-bus-rule-ops.h> + +#define MAX_NODES (5) + +struct rule_update_path_info { + u32 id; + u64 ab; + u64 ib; + u64 clk; + struct list_head link; +}; + +struct rule_apply_rcm_info { + u32 id; + u64 lim_bw; + int throttle; + bool after_clk_commit; + struct list_head link; +}; + +struct bus_rule_type { + int num_src; + int *src_id; + int src_field; + int op; + u64 thresh; + int num_dst; + int *dst_node; + u64 dst_bw; + int mode; + void *client_data; +}; + +#if (defined(CONFIG_BUS_TOPOLOGY_ADHOC)) +void msm_rule_register(int num_rules, struct bus_rule_type *rule, + struct notifier_block *nb); +void msm_rule_unregister(int num_rules, struct bus_rule_type *rule, + struct notifier_block *nb); +void print_rules_buf(char *buf, int count); +bool msm_rule_are_rules_registered(void); +#else +static inline void msm_rule_register(int num_rules, struct bus_rule_type *rule, + struct notifier_block *nb) +{ +} +static inline void msm_rule_unregister(int num_rules, + struct bus_rule_type *rule, + struct notifier_block *nb) +{ +} +static inline void print_rules_buf(char *buf, int count) +{ +} +static inline bool msm_rule_are_rules_registered(void) +{ + return false; +} +#endif /* defined(CONFIG_BUS_TOPOLOGY_ADHOC) */ +#endif /* _ARCH_ARM_MACH_MSM_BUS_RULES_H */ diff --git a/include/linux/of_device.h b/include/linux/of_device.h index cc7dd687a89d..6dca65cbb3f3 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -55,7 +55,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) return of_node_get(cpu_dev->of_node); } -void of_dma_configure(struct device *dev, struct device_node *np); +int of_dma_configure(struct device *dev, struct device_node *np); +void of_dma_deconfigure(struct device *dev); #else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, @@ -98,7 +99,12 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) { return NULL; } -static inline void of_dma_configure(struct device *dev, struct device_node *np) + +static inline int of_dma_configure(struct device *dev, struct device_node *np) +{ + return 0; +} +static inline void of_dma_deconfigure(struct device *dev) {} #endif /* CONFIG_OF */ diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index e80b9c762a03..6a7fc5051099 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -31,8 +31,16 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev, #endif /* CONFIG_OF_IOMMU */ -void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops); -const struct iommu_ops *of_iommu_get_ops(struct device_node *np); +static inline void of_iommu_set_ops(struct device_node *np, + const struct iommu_ops *ops) +{ + iommu_register_instance(&np->fwnode, ops); +} + +static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np) +{ + return iommu_get_instance(&np->fwnode); +} extern struct of_device_id __iommu_of_table; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index f6bc76501912..672783b3fd85 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -23,6 +23,7 @@ struct opp_table; enum dev_pm_opp_event { OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, + OPP_EVENT_ADJUST_VOLTAGE, }; #if defined(CONFIG_PM_OPP) @@ -30,6 +31,7 @@ enum dev_pm_opp_event { unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); +struct regulator *dev_pm_opp_get_regulator(struct device *dev); bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); @@ -53,6 +55,9 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); void dev_pm_opp_remove(struct device *dev, unsigned long freq); +int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, + unsigned long u_volt); + int dev_pm_opp_enable(struct device *dev, unsigned long freq); int dev_pm_opp_disable(struct device *dev, unsigned long freq); @@ -139,6 +144,13 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) { } +static inline int +dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, + unsigned long u_volt) +{ + return 0; +} + static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) { return 0; diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index cc32ab852fbc..dc05d27b1ee0 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -30,8 +30,7 @@ extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); extern bool qcom_scm_pas_supported(u32 peripheral); -extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, - size_t size); +extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size); extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size); extern int qcom_scm_pas_auth_and_reset(u32 peripheral); @@ -46,4 +45,18 @@ extern void qcom_scm_cpu_power_down(u32 flags); extern u32 qcom_scm_get_version(void); +extern int qcom_scm_set_remote_state(u32 state, u32 id); + +extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); +extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); +extern int qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr, + u32 len); +extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); +extern int qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size, u32 id, + u32 ctx_id, u64 va, u32 info_size, + u32 flags); +extern int qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va, u32 size, + u32 flags); +extern int qcom_scm_is_call_available(u32 svc_id, u32 cmd_id); + #endif diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 692108222271..3b073ace9295 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -580,4 +580,11 @@ static inline int regulator_is_supported_voltage_tol(struct regulator *regulator target_uV + tol_uV); } +/* TEMP: Wrapper to keep bisectability */ +static inline int regulator_set_optimum_mode(struct regulator *regulator, + int load_uA) +{ + return regulator_set_load(regulator, load_uA); +} + #endif diff --git a/include/linux/regulator/qcom_smd-regulator.h b/include/linux/regulator/qcom_smd-regulator.h new file mode 100644 index 000000000000..a71b6adc64ba --- /dev/null +++ b/include/linux/regulator/qcom_smd-regulator.h @@ -0,0 +1,30 @@ +/* Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __QCOM_SMD_REGULATOR_H_ +#define __QCOM_SMD_REGULATOR_H_ + +#ifdef CONFIG_REGULATOR_QCOM_SMD_RPM +int qcom_rpm_set_floor(struct regulator *regulator, int floor); +int qcom_rpm_set_corner(struct regulator *regulator, int corner); +#else +static inline int qcom_rpm_set_floor(struct regulator *regulator, int floor) +{ + return -EINVAL; +} + +static inline int qcom_rpm_set_corner(struct regulator *regulator, int corner) +{ + return -EINVAL; +} +#endif + +#endif diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 930023b7c825..8265d351c9f0 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -400,6 +400,7 @@ enum rproc_crash_type { * @firmware_loading_complete: marks e/o asynchronous firmware loading * @bootaddr: address of first instruction to boot rproc with (optional) * @rvdevs: list of remote virtio devices + * @subdevs: list of subdevices, to following the running state * @notifyids: idr for dynamically assigning rproc-wide unique notify ids * @index: index of this rproc device * @crash_handler: workqueue for handling a crash @@ -415,7 +416,7 @@ struct rproc { struct list_head node; struct iommu_domain *domain; const char *name; - const char *firmware; + char *firmware; void *priv; const struct rproc_ops *ops; struct device dev; @@ -431,6 +432,7 @@ struct rproc { struct completion firmware_loading_complete; u32 bootaddr; struct list_head rvdevs; + struct list_head subdevs; struct idr notifyids; int index; struct work_struct crash_handler; @@ -444,6 +446,19 @@ struct rproc { bool auto_boot; }; +/** + * struct rproc_subdev - subdevice tied to a remoteproc + * @node: list node related to the rproc subdevs list + * @probe: probe function, called as the rproc is started + * @remove: remove function, called as the rproc is stopped + */ +struct rproc_subdev { + struct list_head node; + + int (*probe)(struct rproc_subdev *subdev); + void (*remove)(struct rproc_subdev *subdev); +}; + /* we currently support only two vrings per rvdev */ #define RVDEV_NUM_VRINGS 2 @@ -472,6 +487,9 @@ struct rproc_vring { /** * struct rproc_vdev - remoteproc state for a supported virtio device + * @refcount: reference counter for the vdev and vring allocations + * @subdev: handle for registering the vdev as a rproc subdevice + * @id: virtio device id (as in virtio_ids.h) * @node: list node * @rproc: the rproc handle * @vdev: the virio device @@ -479,6 +497,11 @@ struct rproc_vring { * @rsc_offset: offset of the vdev's resource entry */ struct rproc_vdev { + struct kref refcount; + + struct rproc_subdev subdev; + + unsigned int id; struct list_head node; struct rproc *rproc; struct virtio_device vdev; @@ -511,4 +534,11 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) return rvdev->rproc; } +void rproc_add_subdev(struct rproc *rproc, + struct rproc_subdev *subdev, + int (*probe)(struct rproc_subdev *subdev), + void (*remove)(struct rproc_subdev *subdev)); + +void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev); + #endif /* REMOTEPROC_H */ diff --git a/include/linux/remoteproc/st_slim_rproc.h b/include/linux/remoteproc/st_slim_rproc.h new file mode 100644 index 000000000000..4155556fa4b2 --- /dev/null +++ b/include/linux/remoteproc/st_slim_rproc.h @@ -0,0 +1,58 @@ +/* + * SLIM core rproc driver header + * + * Copyright (C) 2016 STMicroelectronics + * + * Author: Peter Griffin <peter.griffin@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _ST_REMOTEPROC_SLIM_H +#define _ST_REMOTEPROC_SLIM_H + +#define ST_SLIM_MEM_MAX 2 +#define ST_SLIM_MAX_CLK 4 + +enum { + ST_SLIM_DMEM, + ST_SLIM_IMEM, +}; + +/** + * struct st_slim_mem - slim internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @size: Size of the memory region + */ +struct st_slim_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + size_t size; +}; + +/** + * struct st_slim_rproc - SLIM slim core + * @rproc: rproc handle + * @mem: slim memory information + * @slimcore: slim slimcore regs + * @peri: slim peripheral regs + * @clks: slim clocks + */ +struct st_slim_rproc { + struct rproc *rproc; + struct st_slim_mem mem[ST_SLIM_MEM_MAX]; + void __iomem *slimcore; + void __iomem *peri; + + /* st_slim_rproc private */ + struct clk *clks[ST_SLIM_MAX_CLK]; +}; + +struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev, + char *fw_name); +void st_slim_rproc_put(struct st_slim_rproc *slim_rproc); + +#endif diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 452d393cc8dd..10d6ae8bbb7d 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -37,9 +37,11 @@ #include <linux/types.h> #include <linux/device.h> +#include <linux/err.h> #include <linux/mod_devicetable.h> #include <linux/kref.h> #include <linux/mutex.h> +#include <linux/poll.h> #define RPMSG_ADDR_ANY 0xFFFFFFFF @@ -64,6 +66,7 @@ struct rpmsg_channel_info { * rpmsg_device - device that belong to the rpmsg bus * @dev: the device struct * @id: device id (used to match between rpmsg drivers and devices) + * @driver_override: driver name to force a match * @src: local address * @dst: destination address * @ept: the rpmsg endpoint of this channel @@ -72,6 +75,7 @@ struct rpmsg_channel_info { struct rpmsg_device { struct device dev; struct rpmsg_device_id id; + char *driver_override; u32 src; u32 dst; struct rpmsg_endpoint *ept; @@ -132,6 +136,8 @@ struct rpmsg_driver { int (*callback)(struct rpmsg_device *, void *, int, void *, u32); }; +#if IS_ENABLED(CONFIG_RPMSG) + int register_rpmsg_device(struct rpmsg_device *dev); void unregister_rpmsg_device(struct rpmsg_device *dev); int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner); @@ -141,6 +147,128 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *, rpmsg_rx_cb_t cb, void *priv, struct rpmsg_channel_info chinfo); +int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); +int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, + void *data, int len); + +int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); +int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); +int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, + void *data, int len); + +unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, + poll_table *wait); + +#else + +static inline int register_rpmsg_device(struct rpmsg_device *dev) +{ + return -ENXIO; +} + +static inline void unregister_rpmsg_device(struct rpmsg_device *dev) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline int __register_rpmsg_driver(struct rpmsg_driver *drv, + struct module *owner) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline void unregister_rpmsg_driver(struct rpmsg_driver *drv) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) +{ + /* This shouldn't be possible */ + WARN_ON(1); +} + +static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev, + rpmsg_rx_cb_t cb, + void *priv, + struct rpmsg_channel_info chinfo) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return ERR_PTR(-ENXIO); +} + +static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, + u32 dst) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; + +} + +static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, + u32 dst, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, + int len, u32 dst) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, + u32 dst, void *data, int len) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + +static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, + struct file *filp, poll_table *wait) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return 0; +} + +#endif /* IS_ENABLED(CONFIG_RPMSG) */ + /* use a macro to avoid include chaining to get THIS_MODULE */ #define register_rpmsg_driver(drv) \ __register_rpmsg_driver(drv, THIS_MODULE) @@ -157,14 +285,4 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *, module_driver(__rpmsg_driver, register_rpmsg_driver, \ unregister_rpmsg_driver) -int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len); -int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); -int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len); - -int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len); -int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); -int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, - void *data, int len); - #endif /* _LINUX_RPMSG_H */ diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h new file mode 100644 index 000000000000..f27917e0a101 --- /dev/null +++ b/include/linux/rpmsg/qcom_smd.h @@ -0,0 +1,31 @@ + +#ifndef _LINUX_RPMSG_QCOM_SMD_H +#define _LINUX_RPMSG_QCOM_SMD_H + +#include <linux/device.h> + +struct qcom_smd_edge; + +#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) + +struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, + struct device_node *node); +int qcom_smd_unregister_edge(struct qcom_smd_edge *edge); + +#else + +static inline struct qcom_smd_edge * +qcom_smd_register_edge(struct device *parent, + struct device_node *node) +{ + return NULL; +} + +static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) +{ + return 0; +} + +#endif + +#endif diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h new file mode 100644 index 000000000000..f423001db3a9 --- /dev/null +++ b/include/linux/soc/qcom/mdt_loader.h @@ -0,0 +1,18 @@ +#ifndef __QCOM_MDT_LOADER_H__ +#define __QCOM_MDT_LOADER_H__ + +#include <linux/types.h> + +#define QCOM_MDT_TYPE_MASK (7 << 24) +#define QCOM_MDT_TYPE_HASH (2 << 24) +#define QCOM_MDT_RELOCATABLE BIT(27) + +struct device; +struct firmware; + +ssize_t qcom_mdt_get_size(const struct firmware *fw); +int qcom_mdt_load(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, void *mem_region, + phys_addr_t mem_phys, size_t mem_size); + +#endif diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h deleted file mode 100644 index f148e0ffbec7..000000000000 --- a/include/linux/soc/qcom/smd.h +++ /dev/null @@ -1,139 +0,0 @@ -#ifndef __QCOM_SMD_H__ -#define __QCOM_SMD_H__ - -#include <linux/device.h> -#include <linux/mod_devicetable.h> - -struct qcom_smd; -struct qcom_smd_channel; -struct qcom_smd_lookup; - -/** - * struct qcom_smd_id - struct used for matching a smd device - * @name: name of the channel - */ -struct qcom_smd_id { - char name[20]; -}; - -/** - * struct qcom_smd_device - smd device struct - * @dev: the device struct - * @channel: handle to the smd channel for this device - */ -struct qcom_smd_device { - struct device dev; - struct qcom_smd_channel *channel; -}; - -typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t); - -/** - * struct qcom_smd_driver - smd driver struct - * @driver: underlying device driver - * @smd_match_table: static channel match table - * @probe: invoked when the smd channel is found - * @remove: invoked when the smd channel is closed - * @callback: invoked when an inbound message is received on the channel, - * should return 0 on success or -EBUSY if the data cannot be - * consumed at this time - */ -struct qcom_smd_driver { - struct device_driver driver; - const struct qcom_smd_id *smd_match_table; - - int (*probe)(struct qcom_smd_device *dev); - void (*remove)(struct qcom_smd_device *dev); - qcom_smd_cb_t callback; -}; - -#if IS_ENABLED(CONFIG_QCOM_SMD) - -int qcom_smd_driver_register(struct qcom_smd_driver *drv); -void qcom_smd_driver_unregister(struct qcom_smd_driver *drv); - -struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel, - const char *name, - qcom_smd_cb_t cb); -void qcom_smd_close_channel(struct qcom_smd_channel *channel); -void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel); -void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data); -int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); - - -struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, - struct device_node *node); -int qcom_smd_unregister_edge(struct qcom_smd_edge *edge); - -#else - -static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv) -{ - return -ENXIO; -} - -static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv) -{ - /* This shouldn't be possible */ - WARN_ON(1); -} - -static inline struct qcom_smd_channel * -qcom_smd_open_channel(struct qcom_smd_channel *channel, - const char *name, - qcom_smd_cb_t cb) -{ - /* This shouldn't be possible */ - WARN_ON(1); - return NULL; -} - -static inline void qcom_smd_close_channel(struct qcom_smd_channel *channel) -{ - /* This shouldn't be possible */ - WARN_ON(1); -} - -static inline void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel) -{ - /* This shouldn't be possible */ - WARN_ON(1); - return NULL; -} - -static inline void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data) -{ - /* This shouldn't be possible */ - WARN_ON(1); -} - -static inline int qcom_smd_send(struct qcom_smd_channel *channel, - const void *data, int len) -{ - /* This shouldn't be possible */ - WARN_ON(1); - return -ENXIO; -} - -static inline struct qcom_smd_edge * -qcom_smd_register_edge(struct device *parent, - struct device_node *node) -{ - return ERR_PTR(-ENXIO); -} - -static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) -{ - /* This shouldn't be possible */ - WARN_ON(1); - return -ENXIO; -} - -#endif - -#define module_qcom_smd_driver(__smd_driver) \ - module_driver(__smd_driver, qcom_smd_driver_register, \ - qcom_smd_driver_unregister) - - -#endif diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h index 7b88697929e9..b8478ee7a71f 100644 --- a/include/linux/soc/qcom/smem_state.h +++ b/include/linux/soc/qcom/smem_state.h @@ -1,7 +1,7 @@ #ifndef __QCOM_SMEM_STATE__ #define __QCOM_SMEM_STATE__ -#include <linux/errno.h> +#include <linux/err.h> struct device_node; struct qcom_smem_state; diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h index a37bc5538f19..a4dd4d7c711d 100644 --- a/include/linux/soc/qcom/wcnss_ctrl.h +++ b/include/linux/soc/qcom/wcnss_ctrl.h @@ -1,8 +1,24 @@ #ifndef __WCNSS_CTRL_H__ #define __WCNSS_CTRL_H__ -#include <linux/soc/qcom/smd.h> +#include <linux/rpmsg.h> -struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb); +#if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL) + +struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, + rpmsg_rx_cb_t cb, void *priv); + +#else + +static struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, + const char *name, + rpmsg_rx_cb_t cb, + void *priv) +{ + WARN_ON(1); + return ERR_PTR(-ENXIO); +} + +#endif #endif diff --git a/include/linux/usb.h b/include/linux/usb.h index eba1f10e8cfd..f3f5d8a396e4 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -354,6 +354,7 @@ struct usb_devmap { */ struct usb_bus { struct device *controller; /* host/master side hardware */ + struct device *sysdev; /* as seen from firmware or bus */ int busnum; /* Bus number (in order of reg) */ const char *bus_name; /* stable id (PCI slot_name etc) */ u8 uses_dma; /* Does the host controller use DMA? */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 492034126876..3a5d591f45e7 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -437,6 +437,9 @@ extern int usb_hcd_alloc_bandwidth(struct usb_device *udev, struct usb_host_interface *new_alt); extern int usb_hcd_get_frame_number(struct usb_device *udev); +struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver, + struct device *sysdev, struct device *dev, const char *bus_name, + struct usb_hcd *primary_hcd); extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver, struct device *dev, const char *bus_name); extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, diff --git a/include/media/media-entity.h b/include/media/media-entity.h index b2203ee7a4c1..bb3a57c24803 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -804,7 +804,7 @@ struct media_link *media_entity_find_link(struct media_pad *source, * Return: returns a pointer to the pad at the remote end of the first found * enabled link, or %NULL if no enabled link has been found. */ -struct media_pad *media_entity_remote_pad(struct media_pad *pad); +struct media_pad *media_entity_remote_pad(const struct media_pad *pad); /** * media_entity_get - Get a reference to the parent module diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h index 34cc99e093ef..125afcd8ba47 100644 --- a/include/media/v4l2-mediabus.h +++ b/include/media/v4l2-mediabus.h @@ -113,4 +113,30 @@ static inline void v4l2_fill_mbus_format(struct v4l2_mbus_framefmt *mbus_fmt, mbus_fmt->code = code; } +static inline void v4l2_fill_pix_format_mplane( + struct v4l2_pix_format_mplane *pix_mp_fmt, + const struct v4l2_mbus_framefmt *mbus_fmt) +{ + pix_mp_fmt->width = mbus_fmt->width; + pix_mp_fmt->height = mbus_fmt->height; + pix_mp_fmt->field = mbus_fmt->field; + pix_mp_fmt->colorspace = mbus_fmt->colorspace; + pix_mp_fmt->ycbcr_enc = mbus_fmt->ycbcr_enc; + pix_mp_fmt->quantization = mbus_fmt->quantization; + pix_mp_fmt->xfer_func = mbus_fmt->xfer_func; +} + +static inline void v4l2_fill_mbus_format_mplane( + struct v4l2_mbus_framefmt *mbus_fmt, + const struct v4l2_pix_format_mplane *pix_mp_fmt) +{ + mbus_fmt->width = pix_mp_fmt->width; + mbus_fmt->height = pix_mp_fmt->height; + mbus_fmt->field = pix_mp_fmt->field; + mbus_fmt->colorspace = pix_mp_fmt->colorspace; + mbus_fmt->ycbcr_enc = pix_mp_fmt->ycbcr_enc; + mbus_fmt->quantization = pix_mp_fmt->quantization; + mbus_fmt->xfer_func = pix_mp_fmt->xfer_func; +} + #endif diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 1b355344c804..1df86ac1473a 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -434,6 +434,47 @@ static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx) } /** + * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready + * buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + */ +#define v4l2_m2m_for_each_dst_buf(m2m_ctx, b) \ + list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + */ +#define v4l2_m2m_for_each_src_buf(m2m_ctx, b) \ + list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready + * buffers safely + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + * @n: used as temporary storage + */ +#define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \ + list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list) + +/** + * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready + * buffers safely + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @b: current buffer of type struct v4l2_m2m_buffer + * @n: used as temporary storage + */ +#define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \ + list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list) + +/** * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers * * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx @@ -485,6 +526,57 @@ static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx) return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx); } +/** + * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready + * buffers + * + * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx + * @vbuf: the buffer to be removed + */ +void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, + struct vb2_v4l2_buffer *vbuf); + +/** + * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list + * of ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: the buffer to be removed + */ +static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf); +} + +/** + * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the + * list of ready buffers + * + * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx + * @vbuf: the buffer to be removed + */ +static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx, + struct vb2_v4l2_buffer *vbuf) +{ + v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf); +} + +struct vb2_v4l2_buffer * +v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx); + +static inline struct vb2_v4l2_buffer * +v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) +{ + return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx); +} + +static inline struct vb2_v4l2_buffer * +v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) +{ + return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx); +} + /* v4l2 ioctl helpers */ int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index ac5898a55fd9..dd6e6b43ac55 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -427,6 +427,16 @@ struct vb2_buf_ops { * @dev: device to use for the default allocation context if the driver * doesn't fill in the @alloc_devs array. * @dma_attrs: DMA attributes to use for the DMA. + * @bidirectional: when this flag is set the DMA direction for the buffers of + * this queue will be overridden with DMA_BIDIRECTIONAL direction. + * This is useful in cases where the hardware (firmware) writes to + * a buffer which is mapped as read (DMA_TO_DEVICE), or reads from + * buffer which is mapped for write (DMA_FROM_DEVICE) in order + * to satisfy some internal hardware restrictions or adds a padding + * needed by the processing algorithm. In case the DMA mapping is + * not bidirectional but the hardware (firmware) trying to access + * the buffer (in the opposite direction) this could lead to an + * IOMMU protection faults. * @fileio_read_once: report EOF after reading the first buffer * @fileio_write_immediately: queue buffer after each write() call * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver @@ -495,6 +505,7 @@ struct vb2_queue { unsigned int io_modes; struct device *dev; unsigned long dma_attrs; + unsigned bidirectional:1; unsigned fileio_read_once:1; unsigned fileio_write_immediately:1; unsigned allow_zero_bytesused:1; diff --git a/include/trace/events/trace_msm_bus.h b/include/trace/events/trace_msm_bus.h new file mode 100644 index 000000000000..5615d257a0b6 --- /dev/null +++ b/include/trace/events/trace_msm_bus.h @@ -0,0 +1,146 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM msm_bus + +#if !defined(_TRACE_MSM_BUS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MSM_BUS_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(bus_update_request, + + TP_PROTO(int sec, int nsec, const char *name, int src, int dest, + unsigned long long ab, unsigned long long ib), + + TP_ARGS(sec, nsec, name, src, dest, ab, ib), + + TP_STRUCT__entry( + __field(int, sec) + __field(int, nsec) + __string(name, name) + __field(int, src) + __field(int, dest) + __field(u64, ab) + __field(u64, ib) + ), + + TP_fast_assign( + __entry->sec = sec; + __entry->nsec = nsec; + __assign_str(name, name); + __entry->src = src; + __entry->dest = dest; + __entry->ab = ab; + __entry->ib = ib; + ), + + TP_printk("time= %d.%d name=%s src=%d dest=%d ab=%llu ib=%llu", + __entry->sec, + __entry->nsec, + __get_str(name), + __entry->src, + __entry->dest, + (unsigned long long)__entry->ab, + (unsigned long long)__entry->ib) +); + +TRACE_EVENT(bus_update_request_end, + + TP_PROTO(const char *name), + + TP_ARGS(name), + + TP_STRUCT__entry( + __string(name, name) + ), + + TP_fast_assign( + __assign_str(name, name); + ), + + TP_printk("client-name=%s", __get_str(name)) +); + +TRACE_EVENT(bus_bimc_config_limiter, + + TP_PROTO(int mas_id, unsigned long long cur_lim_bw), + + TP_ARGS(mas_id, cur_lim_bw), + + TP_STRUCT__entry( + __field(int, mas_id) + __field(u64, cur_lim_bw) + ), + + TP_fast_assign( + __entry->mas_id = mas_id; + __entry->cur_lim_bw = cur_lim_bw; + ), + + TP_printk("Master=%d cur_lim_bw=%llu", + __entry->mas_id, + (unsigned long long)__entry->cur_lim_bw) +); + +TRACE_EVENT(bus_avail_bw, + + TP_PROTO(unsigned long long cur_bimc_bw, unsigned long long cur_mdp_bw), + + TP_ARGS(cur_bimc_bw, cur_mdp_bw), + + TP_STRUCT__entry( + __field(u64, cur_bimc_bw) + __field(u64, cur_mdp_bw) + ), + + TP_fast_assign( + __entry->cur_bimc_bw = cur_bimc_bw; + __entry->cur_mdp_bw = cur_mdp_bw; + ), + + TP_printk("cur_bimc_bw = %llu cur_mdp_bw = %llu", + (unsigned long long)__entry->cur_bimc_bw, + (unsigned long long)__entry->cur_mdp_bw) +); + +TRACE_EVENT(bus_bke_params, + + TP_PROTO(u32 gc, u32 gp, u32 thl, u32 thm, u32 thh), + + TP_ARGS(gc, gp, thl, thm, thh), + + TP_STRUCT__entry( + __field(u32, gc) + __field(u32, gp) + __field(u32, thl) + __field(u32, thm) + __field(u32, thh) + ), + + TP_fast_assign( + __entry->gc = gc; + __entry->gp = gp; + __entry->thl = thl; + __entry->thm = thm; + __entry->thh = thh; + ), + + TP_printk("BKE Params GC=0x%x GP=0x%x THL=0x%x THM=0x%x THH=0x%x", + __entry->gc, __entry->gp, __entry->thl, __entry->thm, + __entry->thh) +); + +#endif +#define TRACE_INCLUDE_FILE trace_msm_bus +#include <trace/define_trace.h> diff --git a/include/uapi/linux/gps_proxy.h b/include/uapi/linux/gps_proxy.h new file mode 100644 index 000000000000..5ac7f6b873b8 --- /dev/null +++ b/include/uapi/linux/gps_proxy.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __GPS_PROXY_H__ +#define __GPS_PROXY_H__ + +#define QMI_LOC_NMEA_STRING_MAX_LENGTH_V02 201 + +enum QGPS_TTY_IOCTL_CMDS { + QGPS_REGISTER_HANDLE_IOC = 0, + QGPS_SEND_NMEA_IOC, + QGPS_IS_ACTIVE_IOC, +}; + +#define QGPS_IOC_MAGIC 'q' +#define QGPS_REGISTER_HANDLE _IO(QGPS_IOC_MAGIC, QGPS_REGISTER_HANDLE_IOC) +#define QGPS_SEND_NMEA _IO(QGPS_IOC_MAGIC, QGPS_SEND_NMEA_IOC) +#define QGPS_IS_ACTIVE _IO(QGPS_IOC_MAGIC, QGPS_IS_ACTIVE_IOC) + +struct gps_proxy_data { + size_t nmea_length; + char nmea_string[QMI_LOC_NMEA_STRING_MAX_LENGTH_V02]; +}; + +#endif /* __GPS_PROXY_H__ */ diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h new file mode 100644 index 000000000000..dedc226e0d3f --- /dev/null +++ b/include/uapi/linux/rpmsg.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2016, Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _UAPI_RPMSG_H_ +#define _UAPI_RPMSG_H_ + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct rpmsg_endpoint_info - endpoint info representation + * @name: name of service + * @src: local address + * @dst: destination address + */ +struct rpmsg_endpoint_info { + char name[32]; + __u32 src; + __u32 dst; +}; + +#define RPMSG_CREATE_EPT_IOCTL _IOW(0xb5, 0x1, struct rpmsg_endpoint_info) +#define RPMSG_DESTROY_EPT_IOCTL _IO(0xb5, 0x2) + +#endif diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 7f34d3c67648..1b0d79c1b404 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -584,6 +584,11 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */ #define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */ #define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */ + /* 12bit raw bayer packed, 6 bytes for every 4 pixels */ +#define V4L2_PIX_FMT_SBGGR12P v4l2_fourcc('p', 'B', 'C', 'C') +#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C') +#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C') +#define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C') #define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */ /* compressed formats */ @@ -602,6 +607,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */ #define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */ #define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */ +#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */ /* Vendor-specific formats */ #define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */ diff --git a/kernel/configs/debug.config b/kernel/configs/debug.config new file mode 100644 index 000000000000..451a71bb6bda --- /dev/null +++ b/kernel/configs/debug.config @@ -0,0 +1,13 @@ +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_SLUB_DEBUG=y +CONFIG_SLUB_DEBUG_ON=y +CONFIG_KASAN=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_USB_GADGET_DEBUG=y +CONFIG_USB_GADGET_DEBUG_FILES=y diff --git a/kernel/configs/distro.config b/kernel/configs/distro.config new file mode 100644 index 000000000000..d8db002ef79c --- /dev/null +++ b/kernel/configs/distro.config @@ -0,0 +1,437 @@ +# USB camera +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_GL860=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_DTCS033=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KINECT=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STK1135=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TOUPTEK=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_PWC=m +CONFIG_USB_ZR364XX=m +CONFIG_USB_STKWEBCAM=m +CONFIG_USB_S2255=m + +# USB serial +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F81232=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_METRO=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_WISHBONE=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m + +# USB gadget +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CONFIGFS=m +CONFIG_USB_ZERO=m +CONFIG_USB_ETH=m +CONFIG_USB_MASS_STORAGE=m +CONFIG_USB_G_SERIAL=m +CONFIG_NEW_LEDS=y + +# USB Eth +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_NET_SR9700=m +CONFIG_USB_NET_SR9800=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_MCS7830=m + +# USB device class +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m + +# LEDs +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_CPU=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_ONESHOT=y +CONFIG_LEDS_TRIGGER_GPIO=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_MTD=y +CONFIG_LEDS_TRIGGER_TRANSIENT=y +CONFIG_LEDS_TRIGGER_PANIC=y +CONFIG_MAC80211_LEDS=y + +# systemd +CONFIG_IPV6=y +CONFIG_NAMESPACES=y +CONFIG_NET_NS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +CONFIG_DEVTMPFS=y +CONFIG_CGROUPS=y +CONFIG_INOTIFY_USER=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EPOLL=y +CONFIG_NET=y +CONFIG_SYSFS=y +CONFIG_PROC_FS=y +CONFIG_FHANDLE=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_BLK_CGROUP=y +CONFIG_DNS_RESOLVER=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_AUTOFS4_FS=y +CONFIG_TMPFS_XATTR=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_DEBUG=y + +# NFS server +CONFIG_ROOT_NFS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y + +# HID +CONFIG_HID_APPLE=y +CONFIG_HID_LOGITECH=m +CONFIG_HID_MAGICMOUSE=m +CONFIG_HID_MICROSOFT=m +CONFIG_HID_PLANTRONICS=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_USB_COMPOSITE=m +CONFIG_HID_MULTITOUCH=m + +#misc +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_PRINTK_TIME=y +CONFIG_STACKTRACE=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_GPIO_SYSFS=y +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=y +CONFIG_RFKILL=y +CONFIG_BINFMT_MISC=m +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_I2C_CHARDEV=y +CONFIG_SPI_SPIDEV=m +CONFIG_ZSMALLOC=m +CONFIG_ZRAM=m + +# PPP +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOE=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m + +# input +CONFIG_INPUT_MISC=y +CONFIG_INPUT_PM8941_PWRKEY=y +CONFIG_INPUT_JOYDEV=m + +# Docker +CONFIG_IPV6=y +CONFIG_NET_NS=y +CONFIG_IPC_NS=y +CONFIG_UTS_NS=y +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_VETH=m +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_NETFILTER=y +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=m +CONFIG_NF_TABLES_NETDEV=m +CONFIG_NF_TABLES_IPV4=m +CONFIG_NF_TABLES_ARP=m +CONFIG_NF_TABLES_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_NAT=y +CONFIG_NF_NAT_IPV4=y +CONFIG_NF_NAT_NEEDED=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_USER_NS=y +CONFIG_SECCOMP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_PERF=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_NET_SCHED=y +CONFIG_NET_CLS_CGROUP=m +CONFIG_CFS_BANDWIDTH=y +CONFIG_IP_VS=m +CONFIG_VXLAN=m +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +CONFIG_IPVLAN=m +CONFIG_MACVLAN=m +CONFIG_DUMMY=m +CONFIG_BTRFS_FS=m +CONFIG_OVERLAY_FS=m +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_POSIX_MQUEUE=y + +# Extended IPV6 support +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NF_NAT_MASQUERADE_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_NF_NAT_IPV6=m +CONFIG_NFT_CHAIN_NAT_IPV6=m +CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m + +# 6LOWPAN +CONFIG_6LOWPAN=m +CONFIG_6LOWPAN_NHC=m +CONFIG_6LOWPAN_NHC_DEST=m +CONFIG_6LOWPAN_NHC_FRAGMENT=m +CONFIG_6LOWPAN_NHC_HOP=m +CONFIG_6LOWPAN_NHC_IPV6=m +CONFIG_6LOWPAN_NHC_MOBILITY=m +CONFIG_6LOWPAN_NHC_ROUTING=m +CONFIG_6LOWPAN_NHC_UDP=m +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_IEEE802154_AT86RF230=m +CONFIG_IEEE802154_AT86RF230_DEBUGFS=y +CONFIG_IEEE802154_MRF24J40=m +CONFIG_IEEE802154_CC2520=m +CONFIG_IEEE802154_ATUSB=m +CONFIG_MAC802154=m + +# Extended BT support +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LEDS=y +CONFIG_BT_DEBUGFS=y +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_3WIRE=y +CONFIG_BT_6LOWPAN=m + +# Extended WLAN support +CONFIG_CFG80211_WEXT=y + +# Legacy instruction support for arm64 +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y diff --git a/net/mac80211/status.c b/net/mac80211/status.c index ad37b4e58c2f..e213a8313450 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -542,6 +542,8 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local, } else if (info->ack_frame_id) { ieee80211_report_ack_skb(local, info, acked, dropped); } + + ieee80211_led_tx(local); } /* @@ -883,8 +885,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) rcu_read_unlock(); - ieee80211_led_tx(local); - /* SNMP counters * Fragments are passed to low-level drivers as separate skbs, so these * are actually fragments, not frames. Update frame counters only for diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig index b83c6807a5ae..326fd97444f5 100644 --- a/net/qrtr/Kconfig +++ b/net/qrtr/Kconfig @@ -16,7 +16,7 @@ if QRTR config QRTR_SMD tristate "SMD IPC Router channels" - depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) + depends on RPMSG || (COMPILE_TEST && RPMSG=n) ---help--- Say Y here to support SMD based ipcrouter channels. SMD is the most common transport for IPC Router. diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c index 0d11132b3370..50615d5efac1 100644 --- a/net/qrtr/smd.c +++ b/net/qrtr/smd.c @@ -14,21 +14,21 @@ #include <linux/module.h> #include <linux/skbuff.h> -#include <linux/soc/qcom/smd.h> +#include <linux/rpmsg.h> #include "qrtr.h" struct qrtr_smd_dev { struct qrtr_endpoint ep; - struct qcom_smd_channel *channel; + struct rpmsg_endpoint *channel; struct device *dev; }; /* from smd to qrtr */ -static int qcom_smd_qrtr_callback(struct qcom_smd_channel *channel, - const void *data, size_t len) +static int qcom_smd_qrtr_callback(struct rpmsg_device *rpdev, + void *data, int len, void *priv, u32 addr) { - struct qrtr_smd_dev *qdev = qcom_smd_get_drvdata(channel); + struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev); int rc; if (!qdev) @@ -54,7 +54,7 @@ static int qcom_smd_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb) if (rc) goto out; - rc = qcom_smd_send(qdev->channel, skb->data, skb->len); + rc = rpmsg_send(qdev->channel, skb->data, skb->len); out: if (rc) @@ -64,57 +64,55 @@ out: return rc; } -static int qcom_smd_qrtr_probe(struct qcom_smd_device *sdev) +static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev) { struct qrtr_smd_dev *qdev; int rc; - qdev = devm_kzalloc(&sdev->dev, sizeof(*qdev), GFP_KERNEL); + qdev = devm_kzalloc(&rpdev->dev, sizeof(*qdev), GFP_KERNEL); if (!qdev) return -ENOMEM; - qdev->channel = sdev->channel; - qdev->dev = &sdev->dev; + qdev->channel = rpdev->ept; + qdev->dev = &rpdev->dev; qdev->ep.xmit = qcom_smd_qrtr_send; rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO); if (rc) return rc; - qcom_smd_set_drvdata(sdev->channel, qdev); - dev_set_drvdata(&sdev->dev, qdev); + dev_set_drvdata(&rpdev->dev, qdev); - dev_dbg(&sdev->dev, "Qualcomm SMD QRTR driver probed\n"); + dev_dbg(&rpdev->dev, "Qualcomm SMD QRTR driver probed\n"); return 0; } -static void qcom_smd_qrtr_remove(struct qcom_smd_device *sdev) +static void qcom_smd_qrtr_remove(struct rpmsg_device *rpdev) { - struct qrtr_smd_dev *qdev = dev_get_drvdata(&sdev->dev); + struct qrtr_smd_dev *qdev = dev_get_drvdata(&rpdev->dev); qrtr_endpoint_unregister(&qdev->ep); - dev_set_drvdata(&sdev->dev, NULL); + dev_set_drvdata(&rpdev->dev, NULL); } -static const struct qcom_smd_id qcom_smd_qrtr_smd_match[] = { +static const struct rpmsg_device_id qcom_smd_qrtr_smd_match[] = { { "IPCRTR" }, {} }; -static struct qcom_smd_driver qcom_smd_qrtr_driver = { +static struct rpmsg_driver qcom_smd_qrtr_driver = { .probe = qcom_smd_qrtr_probe, .remove = qcom_smd_qrtr_remove, .callback = qcom_smd_qrtr_callback, - .smd_match_table = qcom_smd_qrtr_smd_match, - .driver = { + .id_table = qcom_smd_qrtr_smd_match, + .drv = { .name = "qcom_smd_qrtr", - .owner = THIS_MODULE, }, }; -module_qcom_smd_driver(qcom_smd_qrtr_driver); +module_rpmsg_driver(qcom_smd_qrtr_driver); MODULE_DESCRIPTION("Qualcomm IPC-Router SMD interface driver"); MODULE_LICENSE("GPL v2"); diff --git a/scripts/package/Makefile b/scripts/package/Makefile index 71b4a8af9d4d..f82c82aab9e7 100644 --- a/scripts/package/Makefile +++ b/scripts/package/Makefile @@ -24,6 +24,7 @@ # Remove hyphens since they have special meaning in RPM filenames KERNELPATH := kernel-$(subst -,_,$(KERNELRELEASE)) KDEB_SOURCENAME ?= linux-$(KERNELRELEASE) +KBUILD_PKG_ROOTCMD ?="fakeroot -u" export KDEB_SOURCENAME # Include only those top-level files that are needed by make, plus the GPL copy TAR_CONTENT := $(KBUILD_ALLDIRS) .config .scmversion Makefile \ @@ -67,35 +68,20 @@ binrpm-pkg: FORCE $(UTS_MACHINE) -bb $(objtree)/binkernel.spec rm binkernel.spec -# Deb target -# --------------------------------------------------------------------------- -quiet_cmd_builddeb = BUILDDEB - cmd_builddeb = set -e; \ - test `id -u` = 0 || \ - test -n "$(KBUILD_PKG_ROOTCMD)" || { \ - which fakeroot >/dev/null 2>&1 && \ - KBUILD_PKG_ROOTCMD="fakeroot -u"; \ - } || { \ - echo; \ - echo "builddeb must be run as root (or using fakeroot)."; \ - echo "KBUILD_PKG_ROOTCMD is unset and fakeroot not found."; \ - echo "Try setting KBUILD_PKG_ROOTCMD to a command to acquire"; \ - echo "root privileges (e.g., 'fakeroot -u' or 'sudo')."; \ - false; \ - } && \ - \ - $$KBUILD_PKG_ROOTCMD $(CONFIG_SHELL) \ - $(srctree)/scripts/package/builddeb $@ - deb-pkg: FORCE $(MAKE) clean + $(CONFIG_SHELL) $(srctree)/scripts/package/mkdebian $(call cmd,src_tar,$(KDEB_SOURCENAME)) - $(MAKE) KBUILD_SRC= - +$(call cmd,builddeb) + origversion=$$(dpkg-parsechangelog -SVersion |sed 's/-[^-]*$$//');\ + mv $(KDEB_SOURCENAME).tar.gz ../$(KDEB_SOURCENAME)_$${origversion}.orig.tar.gz + +dpkg-buildpackage -j1 -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch) -i.git -us -uc bindeb-pkg: FORCE - $(MAKE) KBUILD_SRC= - +$(call cmd,builddeb) + $(CONFIG_SHELL) $(srctree)/scripts/package/mkdebian + +dpkg-buildpackage -j1 -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch) -b -nc -uc + +intdeb-pkg: FORCE + +$(CONFIG_SHELL) $(srctree)/scripts/package/builddeb clean-dirs += $(objtree)/debian/ diff --git a/scripts/package/builddeb b/scripts/package/builddeb index 3c575cd07888..decd5ad5c8a2 100755 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -30,80 +30,19 @@ create_package() { chmod -R a+rX "$pdir" # Create the package - dpkg-gencontrol $forcearch -Vkernel:debarch="${debarch}" -p$pname -P"$pdir" + dpkg-gencontrol -p$pname -P"$pdir" dpkg --build "$pdir" .. } -set_debarch() { - # Attempt to find the correct Debian architecture - case "$UTS_MACHINE" in - i386|ia64|alpha) - debarch="$UTS_MACHINE" ;; - x86_64) - debarch=amd64 ;; - sparc*) - debarch=sparc ;; - s390*) - debarch=s390$(grep -q CONFIG_64BIT=y $KCONFIG_CONFIG && echo x || true) ;; - ppc*) - debarch=$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo ppc64el || echo powerpc) ;; - parisc*) - debarch=hppa ;; - mips*) - debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;; - aarch64|arm64) - debarch=arm64 ;; - arm*) - if grep -q CONFIG_AEABI=y $KCONFIG_CONFIG; then - if grep -q CONFIG_VFP=y $KCONFIG_CONFIG; then - debarch=armhf - else - debarch=armel - fi - else - debarch=arm - fi - ;; - *) - debarch=$(dpkg --print-architecture) - echo "" >&2 - echo "** ** ** WARNING ** ** **" >&2 - echo "" >&2 - echo "Your architecture doesn't have it's equivalent" >&2 - echo "Debian userspace architecture defined!" >&2 - echo "Falling back to using your current userspace instead!" >&2 - echo "Please add support for $UTS_MACHINE to ${0} ..." >&2 - echo "" >&2 - esac - if [ -n "$KBUILD_DEBARCH" ] ; then - debarch="$KBUILD_DEBARCH" - fi - forcearch="-DArchitecture=$debarch" - -} - -# Some variables and settings used throughout the script version=$KERNELRELEASE -revision=$(cat .version) -if [ -n "$KDEB_PKGVERSION" ]; then - packageversion=$KDEB_PKGVERSION -else - packageversion=$version-$revision -fi -sourcename=$KDEB_SOURCENAME tmpdir="$objtree/debian/tmp" -fwdir="$objtree/debian/fwtmp" kernel_headers_dir="$objtree/debian/hdrtmp" libc_headers_dir="$objtree/debian/headertmp" dbg_dir="$objtree/debian/dbgtmp" packagename=linux-image-$version -fwpackagename=linux-firmware-image-$version kernel_headers_packagename=linux-headers-$version libc_headers_packagename=linux-libc-dev dbg_packagename=$packagename-dbg -debarch= -forcearch= -set_debarch if [ "$ARCH" = "um" ] ; then packagename=user-mode-linux-$version @@ -126,10 +65,9 @@ esac BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)" # Setup the directory structure -rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files +rm -rf "$tmpdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files mkdir -m 755 -p "$tmpdir/DEBIAN" mkdir -p "$tmpdir/lib" "$tmpdir/boot" -mkdir -p "$fwdir/lib/firmware/$version/" mkdir -p "$kernel_headers_dir/lib/modules/$version/" # Build and install the kernel @@ -220,109 +158,6 @@ EOF chmod 755 "$tmpdir/DEBIAN/$script" done -# Try to determine maintainer and email values -if [ -n "$DEBEMAIL" ]; then - email=$DEBEMAIL -elif [ -n "$EMAIL" ]; then - email=$EMAIL -else - email=$(id -nu)@$(hostname -f 2>/dev/null || hostname) -fi -if [ -n "$DEBFULLNAME" ]; then - name=$DEBFULLNAME -elif [ -n "$NAME" ]; then - name=$NAME -else - name="Anonymous" -fi -maintainer="$name <$email>" - -# Try to determine distribution -if [ -n "$KDEB_CHANGELOG_DIST" ]; then - distribution=$KDEB_CHANGELOG_DIST -# In some cases lsb_release returns the codename as n/a, which breaks dpkg-parsechangelog -elif distribution=$(lsb_release -cs 2>/dev/null) && [ -n "$distribution" ] && [ "$distribution" != "n/a" ]; then - : # nothing to do in this case -else - distribution="unstable" - echo >&2 "Using default distribution of 'unstable' in the changelog" - echo >&2 "Install lsb-release or set \$KDEB_CHANGELOG_DIST explicitly" -fi - -# Generate a simple changelog template -cat <<EOF > debian/changelog -$sourcename ($packageversion) $distribution; urgency=low - - * Custom built Linux kernel. - - -- $maintainer $(date -R) -EOF - -# Generate copyright file -cat <<EOF > debian/copyright -This is a packacked upstream version of the Linux kernel. - -The sources may be found at most Linux ftp sites, including: -ftp://ftp.kernel.org/pub/linux/kernel - -Copyright: 1991 - 2015 Linus Torvalds and others. - -The git repository for mainline kernel development is at: -git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 dated June, 1991. - -On Debian GNU/Linux systems, the complete text of the GNU General Public -License version 2 can be found in \`/usr/share/common-licenses/GPL-2'. -EOF - - -build_depends="bc, kmod, cpio " - -# Generate a control file -cat <<EOF > debian/control -Source: $sourcename -Section: kernel -Priority: optional -Maintainer: $maintainer -Build-Depends: $build_depends -Standards-Version: 3.8.4 -Homepage: http://www.kernel.org/ -EOF - -if [ "$ARCH" = "um" ]; then - cat <<EOF >> debian/control - -Package: $packagename -Provides: linux-image, linux-image-2.6, linux-modules-$version -Architecture: any -Description: User Mode Linux kernel, version $version - User-mode Linux is a port of the Linux kernel to its own system call - interface. It provides a kind of virtual machine, which runs Linux - as a user process under another Linux kernel. This is useful for - kernel development, sandboxes, jails, experimentation, and - many other things. - . - This package contains the Linux kernel, modules and corresponding other - files, version: $version. -EOF - -else - cat <<EOF >> debian/control - -Package: $packagename -Provides: linux-image, linux-image-2.6, linux-modules-$version -Suggests: $fwpackagename -Architecture: any -Description: Linux kernel, version $version - This package contains the Linux kernel, modules and corresponding other - files, version: $version. -EOF - -fi - # Build kernel header package (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles" (cd $srctree; find arch/*/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles" @@ -343,47 +178,8 @@ mkdir -p "$destdir" ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build" rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles" -cat <<EOF >> debian/control - -Package: $kernel_headers_packagename -Provides: linux-headers, linux-headers-2.6 -Architecture: any -Description: Linux kernel headers for $KERNELRELEASE on \${kernel:debarch} - This package provides kernel header files for $KERNELRELEASE on \${kernel:debarch} - . - This is useful for people who need to build external modules -EOF - -# Do we have firmware? Move it out of the way and build it into a package. -if [ -e "$tmpdir/lib/firmware" ]; then - mv "$tmpdir/lib/firmware"/* "$fwdir/lib/firmware/$version/" - rmdir "$tmpdir/lib/firmware" - - cat <<EOF >> debian/control - -Package: $fwpackagename -Architecture: all -Description: Linux kernel firmware, version $version - This package contains firmware from the Linux kernel, version $version. -EOF - - create_package "$fwpackagename" "$fwdir" -fi - -cat <<EOF >> debian/control - -Package: $libc_headers_packagename -Section: devel -Provides: linux-kernel-headers -Architecture: any -Description: Linux support headers for userspace development - This package provides userspaces headers from the Linux kernel. These headers - are used by the installed headers for GNU glibc and other system libraries. -EOF - if [ "$ARCH" != "um" ]; then create_package "$kernel_headers_packagename" "$kernel_headers_dir" - create_package "$libc_headers_packagename" "$libc_headers_dir" fi create_package "$packagename" "$tmpdir" @@ -399,48 +195,7 @@ if [ -n "$BUILD_DEBUG" ] ; then ln -s ../lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/boot/vmlinux-$version # kdump-tools ln -s lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/vmlinux-$version - - cat <<EOF >> debian/control - -Package: $dbg_packagename -Section: debug -Provides: linux-debug, linux-debug-$version -Architecture: any -Description: Linux kernel debugging symbols for $version - This package will come in handy if you need to debug the kernel. It provides - all the necessary debug symbols for the kernel and its modules. -EOF - create_package "$dbg_packagename" "$dbg_dir" fi -if [ "x$1" = "xdeb-pkg" ] -then - cat <<EOF > debian/rules -#!/usr/bin/make -f - -build: - \$(MAKE) - -binary-arch: - \$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg - -clean: - rm -rf debian/*tmp debian/files - mv debian/ debian.backup # debian/ might be cleaned away - \$(MAKE) clean - mv debian.backup debian - -binary: binary-arch -EOF - mv ${sourcename}.tar.gz ../${sourcename}_${version}.orig.tar.gz - tar caf ../${sourcename}_${packageversion}.debian.tar.gz debian/{copyright,rules,changelog,control} - dpkg-source -cdebian/control -ldebian/changelog --format="3.0 (custom)" --target-format="3.0 (quilt)" \ - -b / ../${sourcename}_${version}.orig.tar.gz ../${sourcename}_${packageversion}.debian.tar.gz - mv ${sourcename}_${packageversion}*dsc .. - dpkg-genchanges > ../${sourcename}_${packageversion}_${debarch}.changes -else - dpkg-genchanges -b > ../${sourcename}_${packageversion}_${debarch}.changes -fi - exit 0 diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian new file mode 100644 index 000000000000..a151646a2c99 --- /dev/null +++ b/scripts/package/mkdebian @@ -0,0 +1,191 @@ +#!/bin/sh +# +# Copyright 2003 Wichert Akkerman <wichert@wiggy.net> +# +# Simple script to generate a debian/ directory for a Linux kernel. + +set -e + +set_debarch() { + # Attempt to find the correct Debian architecture + case "$UTS_MACHINE" in + i386|ia64|alpha) + debarch="$UTS_MACHINE" ;; + x86_64) + debarch=amd64 ;; + sparc*) + debarch=sparc ;; + s390*) + debarch=s390$(grep -q CONFIG_64BIT=y $KCONFIG_CONFIG && echo x || true) ;; + ppc*) + debarch=$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo ppc64el || echo powerpc) ;; + parisc*) + debarch=hppa ;; + mips*) + debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;; + aarch64|arm64) + debarch=arm64 ;; + arm*) + if grep -q CONFIG_AEABI=y $KCONFIG_CONFIG; then + if grep -q CONFIG_VFP=y $KCONFIG_CONFIG; then + debarch=armhf + else + debarch=armel + fi + else + debarch=arm + fi + ;; + *) + debarch=$(dpkg --print-architecture) + echo "" >&2 + echo "** ** ** WARNING ** ** **" >&2 + echo "" >&2 + echo "Your architecture doesn't have its equivalent" >&2 + echo "Debian userspace architecture defined!" >&2 + echo "Falling back to using your current userspace instead!" >&2 + echo "Please add support for $UTS_MACHINE to ${0} ..." >&2 + echo "" >&2 + esac + if [ -n "$KBUILD_DEBARCH" ] ; then + debarch="$KBUILD_DEBARCH" + fi +} + +# Some variables and settings used throughout the script +version=$KERNELRELEASE +if [ -n "$KDEB_PKGVERSION" ]; then + packageversion=$KDEB_PKGVERSION +else + revision=$(cat .version 2>/dev/null||echo 1) + packageversion=$version-$revision +fi +sourcename=$KDEB_SOURCENAME +packagename=linux-image-$version +kernel_headers_packagename=linux-headers-$version +dbg_packagename=$packagename-dbg +debarch= +set_debarch + +if [ "$ARCH" = "um" ] ; then + packagename=user-mode-linux-$version +fi + +# Try to determine maintainer and email values +if [ -n "$DEBEMAIL" ]; then + email=$DEBEMAIL +elif [ -n "$EMAIL" ]; then + email=$EMAIL +else + email=$(id -nu)@$(hostname -f 2>/dev/null || hostname) +fi +if [ -n "$DEBFULLNAME" ]; then + name=$DEBFULLNAME +elif [ -n "$NAME" ]; then + name=$NAME +else + name="Anonymous" +fi +maintainer="$name <$email>" + +# Try to determine distribution +if [ -n "$KDEB_CHANGELOG_DIST" ]; then + distribution=$KDEB_CHANGELOG_DIST +# In some cases lsb_release returns the codename as n/a, which breaks dpkg-parsechangelog +elif distribution=$(lsb_release -cs 2>/dev/null) && [ -n "$distribution" ] && [ "$distribution" != "n/a" ]; then + : # nothing to do in this case +else + distribution="unstable" + echo >&2 "Using default distribution of 'unstable' in the changelog" + echo >&2 "Install lsb-release or set \$KDEB_CHANGELOG_DIST explicitly" +fi + +mkdir -p debian/ +echo $debarch > debian/arch + +# Generate a simple changelog template +cat <<EOF > debian/changelog +$sourcename ($packageversion) $distribution; urgency=low + + * Custom built Linux kernel. + + -- $maintainer $(date -R) +EOF + +# Generate copyright file +cat <<EOF > debian/copyright +This is a packacked upstream version of the Linux kernel. + +The sources may be found at most Linux archive sites, including: +https://www.kernel.org/pub/linux/kernel + +Copyright: 1991 - 2017 Linus Torvalds and others. + +The git repository for mainline kernel development is at: +git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 dated June, 1991. + +On Debian GNU/Linux systems, the complete text of the GNU General Public +License version 2 can be found in \`/usr/share/common-licenses/GPL-2'. +EOF + +# Generate a control file +cat <<EOF > debian/control +Source: $sourcename +Section: kernel +Priority: optional +Maintainer: $maintainer +Build-Depends: bc, kmod, cpio +Homepage: http://www.kernel.org/ + +Package: $packagename +Architecture: $debarch +Description: Linux kernel, version $version + This package contains the Linux kernel, modules and corresponding other + files, version: $version. + +Package: $kernel_headers_packagename +Architecture: $debarch +Description: Linux kernel headers for $version on $debarch + This package provides kernel header files for $version on $debarch + . + This is useful for people who need to build external modules + +Package: linux-libc-dev +Section: devel +Provides: linux-kernel-headers +Architecture: $debarch +Description: Linux support headers for userspace development + This package provides userspaces headers from the Linux kernel. These headers + are used by the installed headers for GNU glibc and other system libraries. + +Package: $dbg_packagename +Section: debug +Architecture: $debarch +Description: Linux kernel debugging symbols for $version + This package will come in handy if you need to debug the kernel. It provides + all the necessary debug symbols for the kernel and its modules. +EOF + +cat <<EOF > debian/rules +#!/usr/bin/make -f + +build: + \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC= + +binary-arch: + \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC= intdeb-pkg + +clean: + rm -rf debian/*tmp debian/files + mv debian/ debian.backup # debian/ might be cleaned away + \$(MAKE) clean + mv debian.backup debian + +binary: binary-arch +EOF + +exit 0 diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index c67667bb970f..28c7b84f13a5 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -581,6 +581,13 @@ config SND_SOC_MAX9860 depends on I2C select REGMAP_I2C +config SND_SOC_MSM8916_WCD_ANALOG + tristate "Qualcomm MSM8916 WCD Analog Codec" + depends on SPMI || COMPILE_TEST + +config SND_SOC_MSM8916_WCD_DIGITAL + tristate "Qualcomm MSM8916 WCD DIGITAL Codec" + config SND_SOC_PCM1681 tristate "Texas Instruments PCM1681 CODEC" depends on I2C diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 958cd4912fbc..472a7720a316 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -86,6 +86,8 @@ snd-soc-max9850-objs := max9850.o snd-soc-max9860-objs := max9860.o snd-soc-mc13783-objs := mc13783.o snd-soc-ml26124-objs := ml26124.o +snd-soc-msm8916-analog-objs := msm8916-wcd-analog.o +snd-soc-msm8916-digital-objs := msm8916-wcd-digital.o snd-soc-nau8810-objs := nau8810.o snd-soc-nau8825-objs := nau8825.o snd-soc-hdmi-codec-objs := hdmi-codec.o @@ -214,7 +216,6 @@ snd-soc-wm9705-objs := wm9705.o snd-soc-wm9712-objs := wm9712.o snd-soc-wm9713-objs := wm9713.o snd-soc-wm-hubs-objs := wm_hubs.o - # Amp snd-soc-max9877-objs := max9877.o snd-soc-max98504-objs := max98504.o @@ -310,6 +311,8 @@ obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o obj-$(CONFIG_SND_SOC_MAX9860) += snd-soc-max9860.o obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o +obj-$(CONFIG_SND_SOC_MSM8916_WCD_ANALOG) +=snd-soc-msm8916-analog.o +obj-$(CONFIG_SND_SOC_MSM8916_WCD_DIGITAL) +=snd-soc-msm8916-digital.o obj-$(CONFIG_SND_SOC_NAU8810) += snd-soc-nau8810.o obj-$(CONFIG_SND_SOC_NAU8825) += snd-soc-nau8825.o obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c new file mode 100644 index 000000000000..d8e8590746af --- /dev/null +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -0,0 +1,890 @@ +#include <linux/module.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/regulator/consumer.h> +#include <linux/types.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <sound/soc.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/tlv.h> + +#define CDC_D_REVISION1 (0xf000) +#define CDC_D_PERPH_SUBTYPE (0xf005) +#define CDC_D_CDC_RST_CTL (0xf046) +#define RST_CTL_DIG_SW_RST_N_MASK BIT(7) +#define RST_CTL_DIG_SW_RST_N_RESET 0 +#define RST_CTL_DIG_SW_RST_N_REMOVE_RESET BIT(7) + +#define CDC_D_CDC_TOP_CLK_CTL (0xf048) +#define TOP_CLK_CTL_A_MCLK_MCLK2_EN_MASK (BIT(2) | BIT(3)) +#define TOP_CLK_CTL_A_MCLK_EN_ENABLE BIT(2) +#define TOP_CLK_CTL_A_MCLK2_EN_ENABLE BIT(3) + +#define CDC_D_CDC_ANA_CLK_CTL (0xf049) +#define ANA_CLK_CTL_EAR_HPHR_CLK_EN_MASK BIT(0) +#define ANA_CLK_CTL_EAR_HPHR_CLK_EN BIT(0) +#define ANA_CLK_CTL_EAR_HPHL_CLK_EN BIT(1) +#define ANA_CLK_CTL_SPKR_CLK_EN_MASK BIT(4) +#define ANA_CLK_CTL_SPKR_CLK_EN BIT(4) +#define ANA_CLK_CTL_TXA_CLK25_EN BIT(5) + +#define CDC_D_CDC_DIG_CLK_CTL (0xf04A) +#define DIG_CLK_CTL_RXD1_CLK_EN BIT(0) +#define DIG_CLK_CTL_RXD2_CLK_EN BIT(1) +#define DIG_CLK_CTL_RXD3_CLK_EN BIT(3) +#define DIG_CLK_CTL_TXD_CLK_EN BIT(4) +#define DIG_CLK_CTL_NCP_CLK_EN_MASK BIT(6) +#define DIG_CLK_CTL_NCP_CLK_EN BIT(6) +#define DIG_CLK_CTL_RXD_PDM_CLK_EN_MASK BIT(7) +#define DIG_CLK_CTL_RXD_PDM_CLK_EN BIT(7) + +#define CDC_D_CDC_CONN_TX1_CTL (0xf050) +#define CONN_TX1_SERIAL_TX1_MUX GENMASK(1, 0) +#define CONN_TX1_SERIAL_TX1_ADC_1 0x0 +#define CONN_TX1_SERIAL_TX1_RX_PDM_LB 0x1 +#define CONN_TX1_SERIAL_TX1_ZERO 0x2 + +#define CDC_D_CDC_CONN_TX2_CTL (0xf051) +#define CONN_TX2_SERIAL_TX2_MUX GENMASK(1, 0) +#define CONN_TX2_SERIAL_TX2_ADC_2 0x0 +#define CONN_TX2_SERIAL_TX2_RX_PDM_LB 0x1 +#define CONN_TX2_SERIAL_TX2_ZERO 0x2 +#define CDC_D_CDC_CONN_HPHR_DAC_CTL (0xf052) +#define CDC_D_CDC_CONN_RX1_CTL (0xf053) +#define CDC_D_CDC_CONN_RX2_CTL (0xf054) +#define CDC_D_CDC_CONN_RX3_CTL (0xf055) +#define CDC_D_CDC_CONN_RX_LB_CTL (0xf056) +#define CDC_D_SEC_ACCESS (0xf0D0) +#define CDC_D_PERPH_RESET_CTL3 (0xf0DA) +#define CDC_D_PERPH_RESET_CTL4 (0xf0DB) +#define CDC_A_REVISION1 (0xf100) +#define CDC_A_REVISION2 (0xf101) +#define CDC_A_REVISION3 (0xf102) +#define CDC_A_REVISION4 (0xf103) +#define CDC_A_PERPH_TYPE (0xf104) +#define CDC_A_PERPH_SUBTYPE (0xf105) +#define CDC_A_INT_RT_STS (0xf110) +#define CDC_A_INT_SET_TYPE (0xf111) +#define CDC_A_INT_POLARITY_HIGH (0xf112) +#define CDC_A_INT_POLARITY_LOW (0xf113) +#define CDC_A_INT_LATCHED_CLR (0xf114) +#define CDC_A_INT_EN_SET (0xf115) +#define CDC_A_INT_EN_CLR (0xf116) +#define CDC_A_INT_LATCHED_STS (0xf118) +#define CDC_A_INT_PENDING_STS (0xf119) +#define CDC_A_INT_MID_SEL (0xf11A) +#define CDC_A_INT_PRIORITY (0xf11B) +#define CDC_A_MICB_1_EN (0xf140) +#define MICB_1_EN_MICB_ENABLE BIT(7) +#define MICB_1_EN_BYP_CAP_MASK BIT(6) +#define MICB_1_EN_NO_EXT_BYP_CAP BIT(6) +#define MICB_1_EN_EXT_BYP_CAP 0 +#define MICB_1_EN_PULL_DOWN_EN_MASK BIT(5) +#define MICB_1_EN_PULL_DOWN_EN_ENABLE BIT(5) +#define MICB_1_EN_OPA_STG2_TAIL_CURR_MASK GENMASK(3, 1) +#define MICB_1_EN_OPA_STG2_TAIL_CURR_1_60UA (0x4) +#define MICB_1_EN_PULL_UP_EN_MASK BIT(4) +#define MICB_1_EN_TX3_GND_SEL_MASK BIT(0) +#define MICB_1_EN_TX3_GND_SEL_TX_GND 0 + +#define CDC_A_MICB_1_VAL (0xf141) +#define MICB_1_VAL_MICB_OUT_VAL_MASK GENMASK(7, 3) +#define MICB_1_VAL_MICB_OUT_VAL_V2P70V ((0x16) << 3) +#define CDC_A_MICB_1_CTL (0xf142) + +#define MICB_1_CTL_CFILT_REF_SEL_MASK BIT(1) +#define MICB_1_CTL_CFILT_REF_SEL_HPF_REF BIT(1) +#define MICB_1_CTL_EXT_PRECHARG_EN_MASK BIT(5) +#define MICB_1_CTL_EXT_PRECHARG_EN_ENABLE BIT(5) +#define MICB_1_CTL_INT_PRECHARG_BYP_MASK BIT(6) +#define MICB_1_CTL_INT_PRECHARG_BYP_EXT_PRECHRG_SEL BIT(6) + +#define CDC_A_MICB_1_INT_RBIAS (0xf143) +#define MICB_1_INT_TX1_INT_RBIAS_EN_MASK BIT(7) +#define MICB_1_INT_TX1_INT_RBIAS_EN_ENABLE BIT(7) +#define MICB_1_INT_TX1_INT_RBIAS_EN_DISABLE 0 + +#define MICB_1_INT_TX1_INT_PULLUP_EN_MASK BIT(6) +#define MICB_1_INT_TX1_INT_PULLUP_EN_TX1N_TO_MICBIAS BIT(6) +#define MICB_1_INT_TX1_INT_PULLUP_EN_TX1N_TO_GND 0 + +#define MICB_1_INT_TX2_INT_RBIAS_EN_MASK BIT(4) +#define MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE BIT(4) +#define MICB_1_INT_TX2_INT_RBIAS_EN_DISABLE 0 +#define MICB_1_INT_TX2_INT_PULLUP_EN_MASK BIT(3) +#define MICB_1_INT_TX2_INT_PULLUP_EN_TX1N_TO_MICBIAS BIT(3) +#define MICB_1_INT_TX2_INT_PULLUP_EN_TX1N_TO_GND 0 + +#define MICB_1_INT_TX3_INT_RBIAS_EN_MASK BIT(1) +#define MICB_1_INT_TX3_INT_RBIAS_EN_ENABLE BIT(1) +#define MICB_1_INT_TX3_INT_RBIAS_EN_DISABLE 0 +#define MICB_1_INT_TX3_INT_PULLUP_EN_MASK BIT(0) +#define MICB_1_INT_TX3_INT_PULLUP_EN_TX1N_TO_MICBIAS BIT(0) +#define MICB_1_INT_TX3_INT_PULLUP_EN_TX1N_TO_GND 0 + +#define CDC_A_MICB_2_EN (0xf144) +#define CDC_A_TX_1_2_ATEST_CTL_2 (0xf145) +#define CDC_A_MASTER_BIAS_CTL (0xf146) +#define CDC_A_TX_1_EN (0xf160) +#define CDC_A_TX_2_EN (0xf161) +#define CDC_A_TX_1_2_TEST_CTL_1 (0xf162) +#define CDC_A_TX_1_2_TEST_CTL_2 (0xf163) +#define CDC_A_TX_1_2_ATEST_CTL (0xf164) +#define CDC_A_TX_1_2_OPAMP_BIAS (0xf165) +#define CDC_A_TX_3_EN (0xf167) +#define CDC_A_NCP_EN (0xf180) +#define CDC_A_NCP_CLK (0xf181) +#define CDC_A_NCP_FBCTRL (0xf183) +#define CDC_A_NCP_FBCTRL_FB_CLK_INV_MASK BIT(5) +#define CDC_A_NCP_FBCTRL_FB_CLK_INV BIT(5) +#define CDC_A_NCP_BIAS (0xf184) +#define CDC_A_NCP_VCTRL (0xf185) +#define CDC_A_NCP_TEST (0xf186) +#define CDC_A_NCP_CLIM_ADDR (0xf187) +#define CDC_A_RX_CLOCK_DIVIDER (0xf190) +#define CDC_A_RX_COM_OCP_CTL (0xf191) +#define CDC_A_RX_COM_OCP_COUNT (0xf192) +#define CDC_A_RX_COM_BIAS_DAC (0xf193) +#define RX_COM_BIAS_DAC_RX_BIAS_EN_MASK BIT(7) +#define RX_COM_BIAS_DAC_RX_BIAS_EN_ENABLE BIT(7) +#define RX_COM_BIAS_DAC_DAC_REF_EN_MASK BIT(0) +#define RX_COM_BIAS_DAC_DAC_REF_EN_ENABLE BIT(0) + +#define CDC_A_RX_HPH_BIAS_PA (0xf194) +#define CDC_A_RX_HPH_BIAS_LDO_OCP (0xf195) +#define CDC_A_RX_HPH_BIAS_CNP (0xf196) +#define CDC_A_RX_HPH_CNP_EN (0xf197) +#define CDC_A_RX_HPH_L_PA_DAC_CTL (0xf19B) +#define RX_HPA_L_PA_DAC_CTL_DATA_RESET_MASK BIT(1) +#define RX_HPA_L_PA_DAC_CTL_DATA_RESET_RESET BIT(1) +#define CDC_A_RX_HPH_R_PA_DAC_CTL (0xf19D) +#define RX_HPH_R_PA_DAC_CTL_DATA_RESET BIT(1) +#define RX_HPH_R_PA_DAC_CTL_DATA_RESET_MASK BIT(1) + +#define CDC_A_RX_EAR_CTL (0xf19E) +#define RX_EAR_CTL_SPK_VBAT_LDO_EN_MASK BIT(0) +#define RX_EAR_CTL_SPK_VBAT_LDO_EN_ENABLE BIT(0) + +#define CDC_A_SPKR_DAC_CTL (0xf1B0) +#define SPKR_DAC_CTL_DAC_RESET_MASK BIT(4) +#define SPKR_DAC_CTL_DAC_RESET_NORMAL 0 + +#define CDC_A_SPKR_DRV_CTL (0xf1B2) +#define SPKR_DRV_CTL_DEF_MASK 0xEF +#define SPKR_DRV_CLASSD_PA_EN_MASK BIT(7) +#define SPKR_DRV_CLASSD_PA_EN_ENABLE BIT(7) +#define SPKR_DRV_CAL_EN BIT(6) +#define SPKR_DRV_SETTLE_EN BIT(5) +#define SPKR_DRV_FW_EN BIT(3) +#define SPKR_DRV_BOOST_SET BIT(2) +#define SPKR_DRV_CMFB_SET BIT(1) +#define SPKR_DRV_GAIN_SET BIT(0) +#define SPKR_DRV_CTL_DEF_VAL (SPKR_DRV_CLASSD_PA_EN_ENABLE | \ + SPKR_DRV_CAL_EN | SPKR_DRV_SETTLE_EN | \ + SPKR_DRV_FW_EN | SPKR_DRV_BOOST_SET | \ + SPKR_DRV_CMFB_SET | SPKR_DRV_GAIN_SET) +#define CDC_A_SPKR_OCP_CTL (0xf1B4) +#define CDC_A_SPKR_PWRSTG_CTL (0xf1B5) +#define SPKR_PWRSTG_CTL_DAC_EN_MASK BIT(0) +#define SPKR_PWRSTG_CTL_DAC_EN BIT(0) +#define SPKR_PWRSTG_CTL_MASK 0xE0 +#define SPKR_PWRSTG_CTL_BBM_MASK BIT(7) +#define SPKR_PWRSTG_CTL_BBM_EN BIT(7) +#define SPKR_PWRSTG_CTL_HBRDGE_EN_MASK BIT(6) +#define SPKR_PWRSTG_CTL_HBRDGE_EN BIT(6) +#define SPKR_PWRSTG_CTL_CLAMP_EN_MASK BIT(5) +#define SPKR_PWRSTG_CTL_CLAMP_EN BIT(5) + +#define CDC_A_SPKR_DRV_DBG (0xf1B7) +#define CDC_A_CURRENT_LIMIT (0xf1C0) +#define CDC_A_BOOST_EN_CTL (0xf1C3) +#define CDC_A_SLOPE_COMP_IP_ZERO (0xf1C4) +#define CDC_A_SEC_ACCESS (0xf1D0) +#define CDC_A_PERPH_RESET_CTL3 (0xf1DA) +#define CDC_A_PERPH_RESET_CTL4 (0xf1DB) + +#define MSM8916_WCD_ANALOG_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\ + SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000) +#define MSM8916_WCD_ANALOG_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ + SNDRV_PCM_FMTBIT_S24_LE) + +static const char * const supply_names[] = { + "vdd-cdc-io", + "vdd-cdc-tx-rx-cx", +}; + +struct pm8916_wcd_analog_priv { + u16 pmic_rev; + u16 codec_version; + struct clk *mclk; + struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)]; + bool micbias1_cap_mode; + bool micbias2_cap_mode; +}; + +static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" }; +static const char *const rdac2_mux_text[] = { "ZERO", "RX2", "RX1" }; +static const char *const hph_text[] = { "ZERO", "Switch", }; + +static const struct soc_enum hph_enum = SOC_ENUM_SINGLE_VIRT( + ARRAY_SIZE(hph_text), hph_text); + +static const struct snd_kcontrol_new hphl_mux = SOC_DAPM_ENUM("HPHL", hph_enum); +static const struct snd_kcontrol_new hphr_mux = SOC_DAPM_ENUM("HPHR", hph_enum); + +/* ADC2 MUX */ +static const struct soc_enum adc2_enum = SOC_ENUM_SINGLE_VIRT( + ARRAY_SIZE(adc2_mux_text), adc2_mux_text); + +/* RDAC2 MUX */ +static const struct soc_enum rdac2_mux_enum = SOC_ENUM_SINGLE( + CDC_D_CDC_CONN_HPHR_DAC_CTL, 0, 3, rdac2_mux_text); + +static const struct snd_kcontrol_new spkr_switch[] = { + SOC_DAPM_SINGLE("Switch", CDC_A_SPKR_DAC_CTL, 7, 1, 0) +}; + +static const struct snd_kcontrol_new rdac2_mux = SOC_DAPM_ENUM( + "RDAC2 MUX Mux", rdac2_mux_enum); +static const struct snd_kcontrol_new tx_adc2_mux = SOC_DAPM_ENUM( + "ADC2 MUX Mux", adc2_enum); + +/* Analog Gain control 0 dB to +24 dB in 6 dB steps */ +static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 600, 0); + +static const struct snd_kcontrol_new pm8916_wcd_analog_snd_controls[] = { + SOC_SINGLE_TLV("ADC1 Volume", CDC_A_TX_1_EN, 3, 8, 0, analog_gain), + SOC_SINGLE_TLV("ADC2 Volume", CDC_A_TX_2_EN, 3, 8, 0, analog_gain), + SOC_SINGLE_TLV("ADC3 Volume", CDC_A_TX_3_EN, 3, 8, 0, analog_gain), +}; + +static void pm8916_wcd_analog_micbias_enable(struct snd_soc_codec *codec) +{ + snd_soc_update_bits(codec, CDC_A_MICB_1_CTL, + MICB_1_CTL_EXT_PRECHARG_EN_MASK | + MICB_1_CTL_INT_PRECHARG_BYP_MASK, + MICB_1_CTL_INT_PRECHARG_BYP_EXT_PRECHRG_SEL + | MICB_1_CTL_EXT_PRECHARG_EN_ENABLE); + + snd_soc_write(codec, CDC_A_MICB_1_VAL, MICB_1_VAL_MICB_OUT_VAL_V2P70V); + /* + * Special headset needs MICBIAS as 2.7V so wait for + * 50 msec for the MICBIAS to reach 2.7 volts. + */ + msleep(50); + snd_soc_update_bits(codec, CDC_A_MICB_1_CTL, + MICB_1_CTL_EXT_PRECHARG_EN_MASK | + MICB_1_CTL_INT_PRECHARG_BYP_MASK, 0); + +} + +static int pm8916_wcd_analog_enable_micbias_ext(struct snd_soc_codec + *codec, int event, + int reg, u32 cap_mode) +{ + switch (event) { + case SND_SOC_DAPM_POST_PMU: + pm8916_wcd_analog_micbias_enable(codec); + snd_soc_update_bits(codec, CDC_A_MICB_1_EN, + MICB_1_EN_BYP_CAP_MASK, cap_mode); + break; + } + + return 0; +} + +static int pm8916_wcd_analog_enable_micbias_int(struct snd_soc_codec + *codec, int event, + int reg, u32 cap_mode) +{ + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + snd_soc_update_bits(codec, CDC_A_MICB_1_INT_RBIAS, + MICB_1_INT_TX2_INT_RBIAS_EN_MASK, + MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE); + snd_soc_update_bits(codec, reg, MICB_1_EN_PULL_DOWN_EN_MASK, 0); + snd_soc_update_bits(codec, CDC_A_MICB_1_EN, + MICB_1_EN_OPA_STG2_TAIL_CURR_MASK, + MICB_1_EN_OPA_STG2_TAIL_CURR_1_60UA); + + break; + case SND_SOC_DAPM_POST_PMU: + pm8916_wcd_analog_micbias_enable(codec); + snd_soc_update_bits(codec, CDC_A_MICB_1_EN, + MICB_1_EN_BYP_CAP_MASK, cap_mode); + break; + } + + return 0; +} + +static int pm8916_wcd_analog_enable_micbias_ext1(struct + snd_soc_dapm_widget + *w, struct snd_kcontrol + *kcontrol, int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec); + + return pm8916_wcd_analog_enable_micbias_ext(codec, event, w->reg, + wcd->micbias1_cap_mode); +} + +static int pm8916_wcd_analog_enable_micbias_ext2(struct + snd_soc_dapm_widget + *w, struct snd_kcontrol + *kcontrol, int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec); + + return pm8916_wcd_analog_enable_micbias_ext(codec, event, w->reg, + wcd->micbias2_cap_mode); + +} + +static int pm8916_wcd_analog_enable_micbias_int1(struct + snd_soc_dapm_widget + *w, struct snd_kcontrol + *kcontrol, int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec); + + return pm8916_wcd_analog_enable_micbias_int(codec, event, w->reg, + wcd->micbias1_cap_mode); +} + +static int pm8916_wcd_analog_enable_micbias_int2(struct + snd_soc_dapm_widget + *w, struct snd_kcontrol + *kcontrol, int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec); + + return pm8916_wcd_analog_enable_micbias_int(codec, event, w->reg, + wcd->micbias2_cap_mode); +} + +static int pm8916_wcd_analog_enable_adc(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + u16 adc_reg = CDC_A_TX_1_2_TEST_CTL_2; + u8 init_bit_shift; + + if (w->reg == CDC_A_TX_1_EN) + init_bit_shift = 5; + else + init_bit_shift = 4; + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + if (w->reg == CDC_A_TX_2_EN) + snd_soc_update_bits(codec, CDC_A_MICB_1_CTL, + MICB_1_CTL_CFILT_REF_SEL_MASK, + MICB_1_CTL_CFILT_REF_SEL_HPF_REF); + /* + * Add delay of 10 ms to give sufficient time for the voltage + * to shoot up and settle so that the txfe init does not + * happen when the input voltage is changing too much. + */ + usleep_range(10000, 10010); + snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, + 1 << init_bit_shift); + switch (w->reg) { + case CDC_A_TX_1_EN: + snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX1_CTL, + CONN_TX1_SERIAL_TX1_MUX, + CONN_TX1_SERIAL_TX1_ADC_1); + break; + case CDC_A_TX_2_EN: + case CDC_A_TX_3_EN: + snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX2_CTL, + CONN_TX2_SERIAL_TX2_MUX, + CONN_TX2_SERIAL_TX2_ADC_2); + break; + } + break; + case SND_SOC_DAPM_POST_PMU: + /* + * Add delay of 12 ms before deasserting the init + * to reduce the tx pop + */ + usleep_range(12000, 12010); + snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, 0x00); + break; + case SND_SOC_DAPM_POST_PMD: + switch (w->reg) { + case CDC_A_TX_1_EN: + snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX1_CTL, + CONN_TX1_SERIAL_TX1_MUX, + CONN_TX1_SERIAL_TX1_ZERO); + break; + case CDC_A_TX_2_EN: + snd_soc_update_bits(codec, CDC_A_MICB_1_CTL, + MICB_1_CTL_CFILT_REF_SEL_MASK, 0); + case CDC_A_TX_3_EN: + snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX2_CTL, + CONN_TX2_SERIAL_TX2_MUX, + CONN_TX2_SERIAL_TX2_ZERO); + break; + } + + + break; + } + return 0; +} + +static int pm8916_wcd_analog_enable_spk_pa(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + snd_soc_update_bits(codec, CDC_A_SPKR_PWRSTG_CTL, + SPKR_PWRSTG_CTL_DAC_EN_MASK | + SPKR_PWRSTG_CTL_BBM_MASK | + SPKR_PWRSTG_CTL_HBRDGE_EN_MASK | + SPKR_PWRSTG_CTL_CLAMP_EN_MASK, + SPKR_PWRSTG_CTL_DAC_EN| + SPKR_PWRSTG_CTL_BBM_EN | + SPKR_PWRSTG_CTL_HBRDGE_EN | + SPKR_PWRSTG_CTL_CLAMP_EN); + + snd_soc_update_bits(codec, CDC_A_RX_EAR_CTL, + RX_EAR_CTL_SPK_VBAT_LDO_EN_MASK, + RX_EAR_CTL_SPK_VBAT_LDO_EN_ENABLE); + break; + case SND_SOC_DAPM_POST_PMU: + snd_soc_update_bits(codec, CDC_A_SPKR_DRV_CTL, + SPKR_DRV_CTL_DEF_MASK, + SPKR_DRV_CTL_DEF_VAL); + snd_soc_update_bits(codec, w->reg, + SPKR_DRV_CLASSD_PA_EN_MASK, + SPKR_DRV_CLASSD_PA_EN_ENABLE); + break; + case SND_SOC_DAPM_POST_PMD: + snd_soc_update_bits(codec, CDC_A_SPKR_PWRSTG_CTL, + SPKR_PWRSTG_CTL_DAC_EN_MASK| + SPKR_PWRSTG_CTL_BBM_MASK | + SPKR_PWRSTG_CTL_HBRDGE_EN_MASK | + SPKR_PWRSTG_CTL_CLAMP_EN_MASK, 0); + + snd_soc_update_bits(codec, CDC_A_SPKR_DAC_CTL, + SPKR_DAC_CTL_DAC_RESET_MASK, + SPKR_DAC_CTL_DAC_RESET_NORMAL); + snd_soc_update_bits(codec, CDC_A_RX_EAR_CTL, + RX_EAR_CTL_SPK_VBAT_LDO_EN_MASK, 0); + break; + } + return 0; +} + +static const struct reg_default wcd_reg_defaults_2_0[] = { + {CDC_A_RX_COM_OCP_CTL, 0xD1}, + {CDC_A_RX_COM_OCP_COUNT, 0xFF}, + {CDC_D_SEC_ACCESS, 0xA5}, + {CDC_D_PERPH_RESET_CTL3, 0x0F}, + {CDC_A_TX_1_2_OPAMP_BIAS, 0x4F}, + {CDC_A_NCP_FBCTRL, 0x28}, + {CDC_A_SPKR_DRV_CTL, 0x69}, + {CDC_A_SPKR_DRV_DBG, 0x01}, + {CDC_A_BOOST_EN_CTL, 0x5F}, + {CDC_A_SLOPE_COMP_IP_ZERO, 0x88}, + {CDC_A_SEC_ACCESS, 0xA5}, + {CDC_A_PERPH_RESET_CTL3, 0x0F}, + {CDC_A_CURRENT_LIMIT, 0x82}, + {CDC_A_SPKR_DAC_CTL, 0x03}, + {CDC_A_SPKR_OCP_CTL, 0xE1}, + {CDC_A_MASTER_BIAS_CTL, 0x30}, +}; + +static int pm8916_wcd_analog_probe(struct snd_soc_codec *codec) +{ + struct pm8916_wcd_analog_priv *priv = dev_get_drvdata(codec->dev); + int err, reg; + + err = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies); + if (err != 0) { + dev_err(codec->dev, "failed to enable regulators (%d)\n", err); + return err; + } + + snd_soc_codec_set_drvdata(codec, priv); + priv->pmic_rev = snd_soc_read(codec, CDC_D_REVISION1); + priv->codec_version = snd_soc_read(codec, CDC_D_PERPH_SUBTYPE); + + dev_info(codec->dev, "PMIC REV: %d\t CODEC Version: %d\n", + priv->pmic_rev, priv->codec_version); + + snd_soc_write(codec, CDC_D_PERPH_RESET_CTL4, 0x01); + snd_soc_write(codec, CDC_A_PERPH_RESET_CTL4, 0x01); + + for (reg = 0; reg < ARRAY_SIZE(wcd_reg_defaults_2_0); reg++) + snd_soc_write(codec, wcd_reg_defaults_2_0[reg].reg, + wcd_reg_defaults_2_0[reg].def); + + return 0; +} + +static int pm8916_wcd_analog_remove(struct snd_soc_codec *codec) +{ + struct pm8916_wcd_analog_priv *priv = dev_get_drvdata(codec->dev); + + return regulator_bulk_disable(ARRAY_SIZE(priv->supplies), + priv->supplies); +} + +static const struct snd_soc_dapm_route pm8916_wcd_analog_audio_map[] = { + + {"PDM_RX1", NULL, "PDM Playback"}, + {"PDM_RX2", NULL, "PDM Playback"}, + {"PDM_RX3", NULL, "PDM Playback"}, + {"PDM Capture", NULL, "PDM_TX"}, + + /* ADC Connections */ + {"PDM_TX", NULL, "ADC2"}, + {"PDM_TX", NULL, "ADC3"}, + {"ADC2", NULL, "ADC2 MUX"}, + {"ADC3", NULL, "ADC2 MUX"}, + {"ADC2 MUX", "INP2", "ADC2_INP2"}, + {"ADC2 MUX", "INP3", "ADC2_INP3"}, + + {"PDM_TX", NULL, "ADC1"}, + {"ADC1", NULL, "AMIC1"}, + {"ADC2_INP2", NULL, "AMIC2"}, + {"ADC2_INP3", NULL, "AMIC3"}, + + /* RDAC Connections */ + {"HPHR DAC", NULL, "RDAC2 MUX"}, + {"RDAC2 MUX", "RX1", "PDM_RX1"}, + {"RDAC2 MUX", "RX2", "PDM_RX2"}, + {"HPHL DAC", NULL, "PDM_RX1"}, + {"PDM_RX1", NULL, "RXD1_CLK"}, + {"PDM_RX2", NULL, "RXD2_CLK"}, + {"PDM_RX3", NULL, "RXD3_CLK"}, + + {"PDM_RX1", NULL, "RXD_PDM_CLK"}, + {"PDM_RX2", NULL, "RXD_PDM_CLK"}, + {"PDM_RX3", NULL, "RXD_PDM_CLK"}, + + {"ADC1", NULL, "TXD_CLK"}, + {"ADC2", NULL, "TXD_CLK"}, + {"ADC3", NULL, "TXD_CLK"}, + + {"ADC1", NULL, "TXA_CLK25"}, + {"ADC2", NULL, "TXA_CLK25"}, + {"ADC3", NULL, "TXA_CLK25"}, + + {"PDM_RX1", NULL, "A_MCLK2"}, + {"PDM_RX2", NULL, "A_MCLK2"}, + {"PDM_RX3", NULL, "A_MCLK2"}, + + {"PDM_TX", NULL, "A_MCLK2"}, + {"A_MCLK2", NULL, "A_MCLK"}, + + /* Headset (RX MIX1 and RX MIX2) */ + {"HEADPHONE", NULL, "HPHL PA"}, + {"HEADPHONE", NULL, "HPHR PA"}, + + {"HPHL PA", NULL, "EAR_HPHL_CLK"}, + {"HPHR PA", NULL, "EAR_HPHR_CLK"}, + + {"CP", NULL, "NCP_CLK"}, + + {"HPHL PA", NULL, "HPHL"}, + {"HPHR PA", NULL, "HPHR"}, + {"HPHL PA", NULL, "CP"}, + {"HPHL PA", NULL, "RX_BIAS"}, + {"HPHR PA", NULL, "CP"}, + {"HPHR PA", NULL, "RX_BIAS"}, + {"HPHL", "Switch", "HPHL DAC"}, + {"HPHR", "Switch", "HPHR DAC"}, + + {"RX_BIAS", NULL, "DAC_REF"}, + + {"SPK_OUT", NULL, "SPK PA"}, + {"SPK PA", NULL, "RX_BIAS"}, + {"SPK PA", NULL, "SPKR_CLK"}, + {"SPK PA", NULL, "SPK DAC"}, + {"SPK DAC", "Switch", "PDM_RX3"}, + + {"MIC BIAS Internal1", NULL, "INT_LDO_H"}, + {"MIC BIAS Internal2", NULL, "INT_LDO_H"}, + {"MIC BIAS External1", NULL, "INT_LDO_H"}, + {"MIC BIAS External2", NULL, "INT_LDO_H"}, + {"MIC BIAS Internal1", NULL, "vdd-micbias"}, + {"MIC BIAS Internal2", NULL, "vdd-micbias"}, + {"MIC BIAS External1", NULL, "vdd-micbias"}, + {"MIC BIAS External2", NULL, "vdd-micbias"}, +}; + +static const struct snd_soc_dapm_widget pm8916_wcd_analog_dapm_widgets[] = { + + SND_SOC_DAPM_AIF_IN("PDM_RX1", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("PDM_RX2", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("PDM_RX3", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("PDM_TX", NULL, 0, SND_SOC_NOPM, 0, 0), + + SND_SOC_DAPM_INPUT("AMIC1"), + SND_SOC_DAPM_INPUT("AMIC3"), + SND_SOC_DAPM_INPUT("AMIC2"), + SND_SOC_DAPM_OUTPUT("HEADPHONE"), + + /* RX stuff */ + SND_SOC_DAPM_SUPPLY("INT_LDO_H", SND_SOC_NOPM, 1, 0, NULL, 0), + + SND_SOC_DAPM_PGA("HPHL PA", CDC_A_RX_HPH_CNP_EN, 5, 0, NULL, 0), + SND_SOC_DAPM_MUX("HPHL", SND_SOC_NOPM, 0, 0, &hphl_mux), + SND_SOC_DAPM_MIXER("HPHL DAC", CDC_A_RX_HPH_L_PA_DAC_CTL, 3, 0, NULL, + 0), + SND_SOC_DAPM_PGA("HPHR PA", CDC_A_RX_HPH_CNP_EN, 4, 0, NULL, 0), + SND_SOC_DAPM_MUX("HPHR", SND_SOC_NOPM, 0, 0, &hphr_mux), + SND_SOC_DAPM_MIXER("HPHR DAC", CDC_A_RX_HPH_R_PA_DAC_CTL, 3, 0, NULL, + 0), + SND_SOC_DAPM_MIXER("SPK DAC", SND_SOC_NOPM, 0, 0, + spkr_switch, ARRAY_SIZE(spkr_switch)), + + /* Speaker */ + SND_SOC_DAPM_OUTPUT("SPK_OUT"), + SND_SOC_DAPM_PGA_E("SPK PA", CDC_A_SPKR_DRV_CTL, + 6, 0, NULL, 0, + pm8916_wcd_analog_enable_spk_pa, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_REGULATOR_SUPPLY("vdd-micbias", 0, 0), + SND_SOC_DAPM_SUPPLY("CP", CDC_A_NCP_EN, 0, 0, NULL, 0), + + SND_SOC_DAPM_SUPPLY("DAC_REF", CDC_A_RX_COM_BIAS_DAC, 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("RX_BIAS", CDC_A_RX_COM_BIAS_DAC, 7, 0, NULL, 0), + + /* TX */ + SND_SOC_DAPM_SUPPLY("MIC BIAS Internal1", CDC_A_MICB_1_EN, 7, 0, + pm8916_wcd_analog_enable_micbias_int1, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY("MIC BIAS Internal2", CDC_A_MICB_2_EN, 7, 0, + pm8916_wcd_analog_enable_micbias_int2, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_POST_PMD), + + SND_SOC_DAPM_SUPPLY("MIC BIAS External1", CDC_A_MICB_1_EN, 7, 0, + pm8916_wcd_analog_enable_micbias_ext1, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY("MIC BIAS External2", CDC_A_MICB_2_EN, 7, 0, + pm8916_wcd_analog_enable_micbias_ext2, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), + + SND_SOC_DAPM_ADC_E("ADC1", NULL, CDC_A_TX_1_EN, 7, 0, + pm8916_wcd_analog_enable_adc, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_ADC_E("ADC2_INP2", NULL, CDC_A_TX_2_EN, 7, 0, + pm8916_wcd_analog_enable_adc, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_ADC_E("ADC2_INP3", NULL, CDC_A_TX_3_EN, 7, 0, + pm8916_wcd_analog_enable_adc, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_POST_PMD), + + SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_MIXER("ADC3", SND_SOC_NOPM, 0, 0, NULL, 0), + + SND_SOC_DAPM_MUX("ADC2 MUX", SND_SOC_NOPM, 0, 0, &tx_adc2_mux), + SND_SOC_DAPM_MUX("RDAC2 MUX", SND_SOC_NOPM, 0, 0, &rdac2_mux), + + /* Analog path clocks */ + SND_SOC_DAPM_SUPPLY("EAR_HPHR_CLK", CDC_D_CDC_ANA_CLK_CTL, 0, 0, NULL, + 0), + SND_SOC_DAPM_SUPPLY("EAR_HPHL_CLK", CDC_D_CDC_ANA_CLK_CTL, 1, 0, NULL, + 0), + SND_SOC_DAPM_SUPPLY("SPKR_CLK", CDC_D_CDC_ANA_CLK_CTL, 4, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("TXA_CLK25", CDC_D_CDC_ANA_CLK_CTL, 5, 0, NULL, 0), + + /* Digital path clocks */ + + SND_SOC_DAPM_SUPPLY("RXD1_CLK", CDC_D_CDC_DIG_CLK_CTL, 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("RXD2_CLK", CDC_D_CDC_DIG_CLK_CTL, 1, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("RXD3_CLK", CDC_D_CDC_DIG_CLK_CTL, 2, 0, NULL, 0), + + SND_SOC_DAPM_SUPPLY("TXD_CLK", CDC_D_CDC_DIG_CLK_CTL, 4, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("NCP_CLK", CDC_D_CDC_DIG_CLK_CTL, 6, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("RXD_PDM_CLK", CDC_D_CDC_DIG_CLK_CTL, 7, 0, NULL, + 0), + + /* System Clock source */ + SND_SOC_DAPM_SUPPLY("A_MCLK", CDC_D_CDC_TOP_CLK_CTL, 2, 0, NULL, 0), + /* TX ADC and RX DAC Clock source. */ + SND_SOC_DAPM_SUPPLY("A_MCLK2", CDC_D_CDC_TOP_CLK_CTL, 3, 0, NULL, 0), +}; + +static struct regmap *pm8916_get_regmap(struct device *dev) +{ + return dev_get_regmap(dev->parent, NULL); +} + +static int pm8916_wcd_analog_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + snd_soc_update_bits(dai->codec, CDC_D_CDC_RST_CTL, + RST_CTL_DIG_SW_RST_N_MASK, + RST_CTL_DIG_SW_RST_N_REMOVE_RESET); + + return 0; +} + +static void pm8916_wcd_analog_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + snd_soc_update_bits(dai->codec, CDC_D_CDC_RST_CTL, + RST_CTL_DIG_SW_RST_N_MASK, 0); +} + +static struct snd_soc_dai_ops pm8916_wcd_analog_dai_ops = { + .startup = pm8916_wcd_analog_startup, + .shutdown = pm8916_wcd_analog_shutdown, +}; + +static struct snd_soc_dai_driver pm8916_wcd_analog_dai[] = { + [0] = { + .name = "pm8916_wcd_analog_pdm_rx", + .id = 0, + .playback = { + .stream_name = "PDM Playback", + .rates = MSM8916_WCD_ANALOG_RATES, + .formats = MSM8916_WCD_ANALOG_FORMATS, + .channels_min = 1, + .channels_max = 3, + }, + .ops = &pm8916_wcd_analog_dai_ops, + }, + [1] = { + .name = "pm8916_wcd_analog_pdm_tx", + .id = 1, + .capture = { + .stream_name = "PDM Capture", + .rates = MSM8916_WCD_ANALOG_RATES, + .formats = MSM8916_WCD_ANALOG_FORMATS, + .channels_min = 1, + .channels_max = 4, + }, + .ops = &pm8916_wcd_analog_dai_ops, + }, +}; + +static struct snd_soc_codec_driver pm8916_wcd_analog = { + .probe = pm8916_wcd_analog_probe, + .remove = pm8916_wcd_analog_remove, + .get_regmap = pm8916_get_regmap, + .component_driver = { + .controls = pm8916_wcd_analog_snd_controls, + .num_controls = ARRAY_SIZE(pm8916_wcd_analog_snd_controls), + .dapm_widgets = pm8916_wcd_analog_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(pm8916_wcd_analog_dapm_widgets), + .dapm_routes = pm8916_wcd_analog_audio_map, + .num_dapm_routes = ARRAY_SIZE(pm8916_wcd_analog_audio_map), + }, +}; + +static int pm8916_wcd_analog_parse_dt(struct device *dev, + struct pm8916_wcd_analog_priv *priv) +{ + + if (of_property_read_bool(dev->of_node, "qcom,micbias1-ext-cap")) + priv->micbias1_cap_mode = MICB_1_EN_EXT_BYP_CAP; + else + priv->micbias1_cap_mode = MICB_1_EN_NO_EXT_BYP_CAP; + + if (of_property_read_bool(dev->of_node, "qcom,micbias2-ext-cap")) + priv->micbias2_cap_mode = MICB_1_EN_EXT_BYP_CAP; + else + priv->micbias2_cap_mode = MICB_1_EN_NO_EXT_BYP_CAP; + + return 0; +} + +static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) +{ + struct pm8916_wcd_analog_priv *priv; + struct device *dev = &pdev->dev; + int ret, i; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = pm8916_wcd_analog_parse_dt(dev, priv); + if (ret < 0) + return ret; + + priv->mclk = devm_clk_get(dev, "mclk"); + if (IS_ERR(priv->mclk)) { + dev_err(dev, "failed to get mclk\n"); + return PTR_ERR(priv->mclk); + } + + for (i = 0; i < ARRAY_SIZE(supply_names); i++) + priv->supplies[i].supply = supply_names[i]; + + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(priv->supplies), + priv->supplies); + if (ret) { + dev_err(dev, "Failed to get regulator supplies %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(priv->mclk); + if (ret < 0) { + dev_err(dev, "failed to enable mclk %d\n", ret); + return ret; + } + + dev_set_drvdata(dev, priv); + + return snd_soc_register_codec(dev, &pm8916_wcd_analog, + pm8916_wcd_analog_dai, + ARRAY_SIZE(pm8916_wcd_analog_dai)); +} + +static int pm8916_wcd_analog_spmi_remove(struct platform_device *pdev) +{ + struct pm8916_wcd_analog_priv *priv = dev_get_drvdata(&pdev->dev); + + snd_soc_unregister_codec(&pdev->dev); + clk_disable_unprepare(priv->mclk); + + return 0; +} + +static const struct of_device_id pm8916_wcd_analog_spmi_match_table[] = { + { .compatible = "qcom,pm8916-wcd-analog-codec", }, + { } +}; + +static struct platform_driver pm8916_wcd_analog_spmi_driver = { + .driver = { + .name = "qcom,pm8916-wcd-spmi-codec", + .of_match_table = pm8916_wcd_analog_spmi_match_table, + }, + .probe = pm8916_wcd_analog_spmi_probe, + .remove = pm8916_wcd_analog_spmi_remove, +}; + +module_platform_driver(pm8916_wcd_analog_spmi_driver); + +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>"); +MODULE_DESCRIPTION("PMIC PM8916 WCD Analog Codec driver"); +MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c new file mode 100644 index 000000000000..5c3850008374 --- /dev/null +++ b/sound/soc/codecs/msm8916-wcd-digital.c @@ -0,0 +1,939 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/types.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <sound/soc.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/tlv.h> + +#define LPASS_CDC_CLK_RX_RESET_CTL (0x000) +#define LPASS_CDC_CLK_TX_RESET_B1_CTL (0x004) +#define CLK_RX_RESET_B1_CTL_TX1_RESET_MASK BIT(0) +#define CLK_RX_RESET_B1_CTL_TX2_RESET_MASK BIT(1) +#define LPASS_CDC_CLK_DMIC_B1_CTL (0x008) +#define DMIC_B1_CTL_DMIC0_CLK_SEL_MASK GENMASK(3, 1) +#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV2 (0x0 << 1) +#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV3 (0x1 << 1) +#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV4 (0x2 << 1) +#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV6 (0x3 << 1) +#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV16 (0x4 << 1) +#define DMIC_B1_CTL_DMIC0_CLK_EN_MASK BIT(0) +#define DMIC_B1_CTL_DMIC0_CLK_EN_ENABLE BIT(0) + +#define LPASS_CDC_CLK_RX_I2S_CTL (0x00C) +#define RX_I2S_CTL_RX_I2S_MODE_MASK BIT(5) +#define RX_I2S_CTL_RX_I2S_MODE_16 BIT(5) +#define RX_I2S_CTL_RX_I2S_MODE_32 0 +#define RX_I2S_CTL_RX_I2S_FS_RATE_MASK GENMASK(2, 0) +#define RX_I2S_CTL_RX_I2S_FS_RATE_F_8_KHZ 0x0 +#define RX_I2S_CTL_RX_I2S_FS_RATE_F_16_KHZ 0x1 +#define RX_I2S_CTL_RX_I2S_FS_RATE_F_32_KHZ 0x2 +#define RX_I2S_CTL_RX_I2S_FS_RATE_F_48_KHZ 0x3 +#define RX_I2S_CTL_RX_I2S_FS_RATE_F_96_KHZ 0x4 +#define RX_I2S_CTL_RX_I2S_FS_RATE_F_192_KHZ 0x5 +#define LPASS_CDC_CLK_TX_I2S_CTL (0x010) +#define TX_I2S_CTL_TX_I2S_MODE_MASK BIT(5) +#define TX_I2S_CTL_TX_I2S_MODE_16 BIT(5) +#define TX_I2S_CTL_TX_I2S_MODE_32 0 +#define TX_I2S_CTL_TX_I2S_FS_RATE_MASK GENMASK(2, 0) +#define TX_I2S_CTL_TX_I2S_FS_RATE_F_8_KHZ 0x0 +#define TX_I2S_CTL_TX_I2S_FS_RATE_F_16_KHZ 0x1 +#define TX_I2S_CTL_TX_I2S_FS_RATE_F_32_KHZ 0x2 +#define TX_I2S_CTL_TX_I2S_FS_RATE_F_48_KHZ 0x3 +#define TX_I2S_CTL_TX_I2S_FS_RATE_F_96_KHZ 0x4 +#define TX_I2S_CTL_TX_I2S_FS_RATE_F_192_KHZ 0x5 + +#define LPASS_CDC_CLK_OTHR_RESET_B1_CTL (0x014) +#define LPASS_CDC_CLK_TX_CLK_EN_B1_CTL (0x018) +#define LPASS_CDC_CLK_OTHR_CTL (0x01C) +#define LPASS_CDC_CLK_RX_B1_CTL (0x020) +#define LPASS_CDC_CLK_MCLK_CTL (0x024) +#define MCLK_CTL_MCLK_EN_MASK BIT(0) +#define MCLK_CTL_MCLK_EN_ENABLE BIT(0) +#define MCLK_CTL_MCLK_EN_DISABLE 0 +#define LPASS_CDC_CLK_PDM_CTL (0x028) +#define LPASS_CDC_CLK_PDM_CTL_PDM_EN_MASK BIT(0) +#define LPASS_CDC_CLK_PDM_CTL_PDM_EN BIT(0) +#define LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_MASK BIT(1) +#define LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_FB BIT(1) +#define LPASS_CDC_CLK_PDM_CTL_PDM_CLK_PDM_CLK 0 + +#define LPASS_CDC_CLK_SD_CTL (0x02C) +#define LPASS_CDC_RX1_B1_CTL (0x040) +#define LPASS_CDC_RX2_B1_CTL (0x060) +#define LPASS_CDC_RX3_B1_CTL (0x080) +#define LPASS_CDC_RX1_B2_CTL (0x044) +#define LPASS_CDC_RX2_B2_CTL (0x064) +#define LPASS_CDC_RX3_B2_CTL (0x084) +#define LPASS_CDC_RX1_B3_CTL (0x048) +#define LPASS_CDC_RX2_B3_CTL (0x068) +#define LPASS_CDC_RX3_B3_CTL (0x088) +#define LPASS_CDC_RX1_B4_CTL (0x04C) +#define LPASS_CDC_RX2_B4_CTL (0x06C) +#define LPASS_CDC_RX3_B4_CTL (0x08C) +#define LPASS_CDC_RX1_B5_CTL (0x050) +#define LPASS_CDC_RX2_B5_CTL (0x070) +#define LPASS_CDC_RX3_B5_CTL (0x090) +#define LPASS_CDC_RX1_B6_CTL (0x054) +#define RXn_B6_CTL_MUTE_MASK BIT(0) +#define RXn_B6_CTL_MUTE_ENABLE BIT(0) +#define RXn_B6_CTL_MUTE_DISABLE 0 +#define LPASS_CDC_RX2_B6_CTL (0x074) +#define LPASS_CDC_RX3_B6_CTL (0x094) +#define LPASS_CDC_RX1_VOL_CTL_B1_CTL (0x058) +#define LPASS_CDC_RX2_VOL_CTL_B1_CTL (0x078) +#define LPASS_CDC_RX3_VOL_CTL_B1_CTL (0x098) +#define LPASS_CDC_RX1_VOL_CTL_B2_CTL (0x05C) +#define LPASS_CDC_RX2_VOL_CTL_B2_CTL (0x07C) +#define LPASS_CDC_RX3_VOL_CTL_B2_CTL (0x09C) +#define LPASS_CDC_TOP_GAIN_UPDATE (0x0A0) +#define LPASS_CDC_TOP_CTL (0x0A4) +#define TOP_CTL_DIG_MCLK_FREQ_MASK BIT(0) +#define TOP_CTL_DIG_MCLK_FREQ_F_12_288MHZ 0 +#define TOP_CTL_DIG_MCLK_FREQ_F_9_6MHZ BIT(0) + +#define LPASS_CDC_DEBUG_DESER1_CTL (0x0E0) +#define LPASS_CDC_DEBUG_DESER2_CTL (0x0E4) +#define LPASS_CDC_DEBUG_B1_CTL_CFG (0x0E8) +#define LPASS_CDC_DEBUG_B2_CTL_CFG (0x0EC) +#define LPASS_CDC_DEBUG_B3_CTL_CFG (0x0F0) +#define LPASS_CDC_IIR1_GAIN_B1_CTL (0x100) +#define LPASS_CDC_IIR2_GAIN_B1_CTL (0x140) +#define LPASS_CDC_IIR1_GAIN_B2_CTL (0x104) +#define LPASS_CDC_IIR2_GAIN_B2_CTL (0x144) +#define LPASS_CDC_IIR1_GAIN_B3_CTL (0x108) +#define LPASS_CDC_IIR2_GAIN_B3_CTL (0x148) +#define LPASS_CDC_IIR1_GAIN_B4_CTL (0x10C) +#define LPASS_CDC_IIR2_GAIN_B4_CTL (0x14C) +#define LPASS_CDC_IIR1_GAIN_B5_CTL (0x110) +#define LPASS_CDC_IIR2_GAIN_B5_CTL (0x150) +#define LPASS_CDC_IIR1_GAIN_B6_CTL (0x114) +#define LPASS_CDC_IIR2_GAIN_B6_CTL (0x154) +#define LPASS_CDC_IIR1_GAIN_B7_CTL (0x118) +#define LPASS_CDC_IIR2_GAIN_B7_CTL (0x158) +#define LPASS_CDC_IIR1_GAIN_B8_CTL (0x11C) +#define LPASS_CDC_IIR2_GAIN_B8_CTL (0x15C) +#define LPASS_CDC_IIR1_CTL (0x120) +#define LPASS_CDC_IIR2_CTL (0x160) +#define LPASS_CDC_IIR1_GAIN_TIMER_CTL (0x124) +#define LPASS_CDC_IIR2_GAIN_TIMER_CTL (0x164) +#define LPASS_CDC_IIR1_COEF_B1_CTL (0x128) +#define LPASS_CDC_IIR2_COEF_B1_CTL (0x168) +#define LPASS_CDC_IIR1_COEF_B2_CTL (0x12C) +#define LPASS_CDC_IIR2_COEF_B2_CTL (0x16C) +#define LPASS_CDC_CONN_RX1_B1_CTL (0x180) +#define LPASS_CDC_CONN_RX1_B2_CTL (0x184) +#define LPASS_CDC_CONN_RX1_B3_CTL (0x188) +#define LPASS_CDC_CONN_RX2_B1_CTL (0x18C) +#define LPASS_CDC_CONN_RX2_B2_CTL (0x190) +#define LPASS_CDC_CONN_RX2_B3_CTL (0x194) +#define LPASS_CDC_CONN_RX3_B1_CTL (0x198) +#define LPASS_CDC_CONN_RX3_B2_CTL (0x19C) +#define LPASS_CDC_CONN_TX_B1_CTL (0x1A0) +#define LPASS_CDC_CONN_EQ1_B1_CTL (0x1A8) +#define LPASS_CDC_CONN_EQ1_B2_CTL (0x1AC) +#define LPASS_CDC_CONN_EQ1_B3_CTL (0x1B0) +#define LPASS_CDC_CONN_EQ1_B4_CTL (0x1B4) +#define LPASS_CDC_CONN_EQ2_B1_CTL (0x1B8) +#define LPASS_CDC_CONN_EQ2_B2_CTL (0x1BC) +#define LPASS_CDC_CONN_EQ2_B3_CTL (0x1C0) +#define LPASS_CDC_CONN_EQ2_B4_CTL (0x1C4) +#define LPASS_CDC_CONN_TX_I2S_SD1_CTL (0x1C8) +#define LPASS_CDC_TX1_VOL_CTL_TIMER (0x280) +#define LPASS_CDC_TX2_VOL_CTL_TIMER (0x2A0) +#define LPASS_CDC_TX1_VOL_CTL_GAIN (0x284) +#define LPASS_CDC_TX2_VOL_CTL_GAIN (0x2A4) +#define LPASS_CDC_TX1_VOL_CTL_CFG (0x288) +#define TX_VOL_CTL_CFG_MUTE_EN_MASK BIT(0) +#define TX_VOL_CTL_CFG_MUTE_EN_ENABLE BIT(0) + +#define LPASS_CDC_TX2_VOL_CTL_CFG (0x2A8) +#define LPASS_CDC_TX1_MUX_CTL (0x28C) +#define TX_MUX_CTL_ADC_DMIC_SEL_MASK BIT(0) +#define TX_MUX_CTL_ADC_DMIC_SEL_DMIC BIT(0) +#define TX_MUX_CTL_ADC_DMIC_SEL_ADC 0 +#define TX_MUX_CTL_CUT_OFF_FREQ_MASK GENMASK(5, 4) +#define TX_MUX_CTL_CUT_OFF_FREQ_SHIFT 4 +#define TX_MUX_CTL_CF_NEG_3DB_4HZ (0x0 << 4) +#define TX_MUX_CTL_CF_NEG_3DB_75HZ (0x1 << 4) +#define TX_MUX_CTL_CF_NEG_3DB_150HZ (0x2 << 4) +#define TX_MUX_CTL_HPF_BP_SEL_MASK BIT(3) +#define TX_MUX_CTL_HPF_BP_SEL_BYPASS BIT(3) +#define TX_MUX_CTL_HPF_BP_SEL_NO_BYPASS 0 + +#define LPASS_CDC_TX2_MUX_CTL (0x2AC) +#define LPASS_CDC_TX1_CLK_FS_CTL (0x290) +#define LPASS_CDC_TX2_CLK_FS_CTL (0x2B0) +#define LPASS_CDC_TX1_DMIC_CTL (0x294) +#define LPASS_CDC_TX2_DMIC_CTL (0x2B4) +#define TXN_DMIC_CTL_CLK_SEL_MASK GENMASK(2, 0) +#define TXN_DMIC_CTL_CLK_SEL_DIV2 0x0 +#define TXN_DMIC_CTL_CLK_SEL_DIV3 0x1 +#define TXN_DMIC_CTL_CLK_SEL_DIV4 0x2 +#define TXN_DMIC_CTL_CLK_SEL_DIV6 0x3 +#define TXN_DMIC_CTL_CLK_SEL_DIV16 0x4 + +#define DEFAULT_MCLK_RATE 9600000 + +#define MSM8916_WCD_DIGITAL_RATES (SNDRV_PCM_RATE_8000 | \ + SNDRV_PCM_RATE_16000 | \ + SNDRV_PCM_RATE_32000 | \ + SNDRV_PCM_RATE_48000) +#define MSM8916_WCD_DIGITAL_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ + SNDRV_PCM_FMTBIT_S24_LE) + +struct msm8916_wcd_digital_priv { + struct clk *ahbclk, *mclk; +}; + +static const unsigned long rx_gain_reg[] = { + LPASS_CDC_RX1_VOL_CTL_B2_CTL, + LPASS_CDC_RX2_VOL_CTL_B2_CTL, + LPASS_CDC_RX3_VOL_CTL_B2_CTL, +}; + +static const unsigned long tx_gain_reg[] = { + LPASS_CDC_TX1_VOL_CTL_GAIN, + LPASS_CDC_TX2_VOL_CTL_GAIN, +}; + +static const char *const rx_mix1_text[] = { + "ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3" +}; + +static const char *const dec_mux_text[] = { + "ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2" +}; +static const char *const rx_mix2_text[] = { "ZERO", "IIR1", "IIR2" }; +static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" }; + +/* RX1 MIX1 */ +static const struct soc_enum rx_mix1_inp_enum[] = { + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B1_CTL, 0, 6, rx_mix1_text), + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B1_CTL, 3, 6, rx_mix1_text), + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B2_CTL, 0, 6, rx_mix1_text), +}; + +/* RX1 MIX2 */ +static const struct soc_enum rx_mix2_inp1_chain_enum = SOC_ENUM_SINGLE( + LPASS_CDC_CONN_RX1_B3_CTL, 0, 3, rx_mix2_text); + +/* RX2 MIX1 */ +static const struct soc_enum rx2_mix1_inp_enum[] = { + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text), + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 3, 6, rx_mix1_text), + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text), +}; + +/* RX2 MIX2 */ +static const struct soc_enum rx2_mix2_inp1_chain_enum = SOC_ENUM_SINGLE( + LPASS_CDC_CONN_RX2_B3_CTL, 0, 3, rx_mix2_text); + +/* RX3 MIX1 */ +static const struct soc_enum rx3_mix1_inp_enum[] = { + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text), + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 3, 6, rx_mix1_text), + SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text), +}; + +/* DEC */ +static const struct soc_enum dec1_mux_enum = SOC_ENUM_SINGLE( + LPASS_CDC_CONN_TX_B1_CTL, 0, 6, dec_mux_text); +static const struct soc_enum dec2_mux_enum = SOC_ENUM_SINGLE( + LPASS_CDC_CONN_TX_B1_CTL, 3, 6, dec_mux_text); + +/* RDAC2 MUX */ +static const struct snd_kcontrol_new dec1_mux = SOC_DAPM_ENUM( + "DEC1 MUX Mux", dec1_mux_enum); +static const struct snd_kcontrol_new dec2_mux = SOC_DAPM_ENUM( + "DEC2 MUX Mux", dec2_mux_enum); +static const struct snd_kcontrol_new rx_mix1_inp1_mux = SOC_DAPM_ENUM( + "RX1 MIX1 INP1 Mux", rx_mix1_inp_enum[0]); +static const struct snd_kcontrol_new rx_mix1_inp2_mux = SOC_DAPM_ENUM( + "RX1 MIX1 INP2 Mux", rx_mix1_inp_enum[1]); +static const struct snd_kcontrol_new rx_mix1_inp3_mux = SOC_DAPM_ENUM( + "RX1 MIX1 INP3 Mux", rx_mix1_inp_enum[2]); +static const struct snd_kcontrol_new rx2_mix1_inp1_mux = SOC_DAPM_ENUM( + "RX2 MIX1 INP1 Mux", rx2_mix1_inp_enum[0]); +static const struct snd_kcontrol_new rx2_mix1_inp2_mux = SOC_DAPM_ENUM( + "RX2 MIX1 INP2 Mux", rx2_mix1_inp_enum[1]); +static const struct snd_kcontrol_new rx2_mix1_inp3_mux = SOC_DAPM_ENUM( + "RX2 MIX1 INP3 Mux", rx2_mix1_inp_enum[2]); +static const struct snd_kcontrol_new rx3_mix1_inp1_mux = SOC_DAPM_ENUM( + "RX3 MIX1 INP1 Mux", rx3_mix1_inp_enum[0]); +static const struct snd_kcontrol_new rx3_mix1_inp2_mux = SOC_DAPM_ENUM( + "RX3 MIX1 INP2 Mux", rx3_mix1_inp_enum[1]); +static const struct snd_kcontrol_new rx3_mix1_inp3_mux = SOC_DAPM_ENUM( + "RX3 MIX1 INP3 Mux", rx3_mix1_inp_enum[2]); + +/* Digital Gain control -38.4 dB to +38.4 dB in 0.3 dB steps */ +static const DECLARE_TLV_DB_SCALE(digital_gain, -3840, 30, 0); + +/* Cutoff Freq for High Pass Filter at -3dB */ +static const char * const hpf_cutoff_text[] = { + "4Hz", "75Hz", "150Hz", +}; + +static SOC_ENUM_SINGLE_DECL(tx1_hpf_cutoff_enum, LPASS_CDC_TX1_MUX_CTL, 4, + hpf_cutoff_text); +static SOC_ENUM_SINGLE_DECL(tx2_hpf_cutoff_enum, LPASS_CDC_TX2_MUX_CTL, 4, + hpf_cutoff_text); + +/* cut off for dc blocker inside rx chain */ +static const char * const dc_blocker_cutoff_text[] = { + "4Hz", "75Hz", "150Hz", +}; + +static SOC_ENUM_SINGLE_DECL(rx1_dcb_cutoff_enum, LPASS_CDC_RX1_B4_CTL, 0, + dc_blocker_cutoff_text); +static SOC_ENUM_SINGLE_DECL(rx2_dcb_cutoff_enum, LPASS_CDC_RX2_B4_CTL, 0, + dc_blocker_cutoff_text); +static SOC_ENUM_SINGLE_DECL(rx3_dcb_cutoff_enum, LPASS_CDC_RX3_B4_CTL, 0, + dc_blocker_cutoff_text); + +static const struct snd_kcontrol_new msm8916_wcd_digital_snd_controls[] = { + SOC_SINGLE_S8_TLV("RX1 Digital Volume", LPASS_CDC_RX1_VOL_CTL_B2_CTL, + -128, 127, digital_gain), + SOC_SINGLE_S8_TLV("RX2 Digital Volume", LPASS_CDC_RX2_VOL_CTL_B2_CTL, + -128, 127, digital_gain), + SOC_SINGLE_S8_TLV("RX3 Digital Volume", LPASS_CDC_RX3_VOL_CTL_B2_CTL, + -128, 127, digital_gain), + SOC_SINGLE_S8_TLV("TX1 Digital Volume", LPASS_CDC_TX1_VOL_CTL_GAIN, + -128, 127, digital_gain), + SOC_SINGLE_S8_TLV("TX2 Digital Volume", LPASS_CDC_TX2_VOL_CTL_GAIN, + -128, 127, digital_gain), + SOC_ENUM("TX1 HPF Cutoff", tx1_hpf_cutoff_enum), + SOC_ENUM("TX2 HPF Cutoff", tx2_hpf_cutoff_enum), + SOC_SINGLE("TX1 HPF Switch", LPASS_CDC_TX1_MUX_CTL, 3, 1, 0), + SOC_SINGLE("TX2 HPF Switch", LPASS_CDC_TX2_MUX_CTL, 3, 1, 0), + SOC_ENUM("RX1 DCB Cutoff", rx1_dcb_cutoff_enum), + SOC_ENUM("RX2 DCB Cutoff", rx2_dcb_cutoff_enum), + SOC_ENUM("RX3 DCB Cutoff", rx3_dcb_cutoff_enum), + SOC_SINGLE("RX1 DCB Switch", LPASS_CDC_RX1_B5_CTL, 2, 1, 0), + SOC_SINGLE("RX2 DCB Switch", LPASS_CDC_RX2_B5_CTL, 2, 1, 0), + SOC_SINGLE("RX3 DCB Switch", LPASS_CDC_RX3_B5_CTL, 2, 1, 0), + SOC_SINGLE("RX1 Mute Switch", LPASS_CDC_RX1_B6_CTL, 0, 1, 0), + SOC_SINGLE("RX2 Mute Switch", LPASS_CDC_RX2_B6_CTL, 0, 1, 0), + SOC_SINGLE("RX3 Mute Switch", LPASS_CDC_RX3_B6_CTL, 0, 1, 0), +}; + +static int msm8916_wcd_digital_enable_interpolator( + struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + /* apply the digital gain after the interpolator is enabled */ + usleep_range(10000, 10100); + snd_soc_write(codec, rx_gain_reg[w->shift], + snd_soc_read(codec, rx_gain_reg[w->shift])); + break; + } + return 0; +} + +static int msm8916_wcd_digital_enable_dec(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + unsigned int decimator = w->shift + 1; + u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg; + u8 dec_hpf_cut_of_freq; + + dec_reset_reg = LPASS_CDC_CLK_TX_RESET_B1_CTL; + tx_vol_ctl_reg = LPASS_CDC_TX1_VOL_CTL_CFG + 32 * (decimator - 1); + tx_mux_ctl_reg = LPASS_CDC_TX1_MUX_CTL + 32 * (decimator - 1); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + /* Enable TX digital mute */ + snd_soc_update_bits(codec, tx_vol_ctl_reg, + TX_VOL_CTL_CFG_MUTE_EN_MASK, + TX_VOL_CTL_CFG_MUTE_EN_ENABLE); + dec_hpf_cut_of_freq = snd_soc_read(codec, tx_mux_ctl_reg) & + TX_MUX_CTL_CUT_OFF_FREQ_MASK; + dec_hpf_cut_of_freq >>= TX_MUX_CTL_CUT_OFF_FREQ_SHIFT; + if (dec_hpf_cut_of_freq != TX_MUX_CTL_CF_NEG_3DB_150HZ) { + /* set cut of freq to CF_MIN_3DB_150HZ (0x1) */ + snd_soc_update_bits(codec, tx_mux_ctl_reg, + TX_MUX_CTL_CUT_OFF_FREQ_MASK, + TX_MUX_CTL_CF_NEG_3DB_150HZ); + } + break; + case SND_SOC_DAPM_POST_PMU: + /* enable HPF */ + snd_soc_update_bits(codec, tx_mux_ctl_reg, + TX_MUX_CTL_HPF_BP_SEL_MASK, + TX_MUX_CTL_HPF_BP_SEL_NO_BYPASS); + /* apply the digital gain after the decimator is enabled */ + snd_soc_write(codec, tx_gain_reg[w->shift], + snd_soc_read(codec, tx_gain_reg[w->shift])); + snd_soc_update_bits(codec, tx_vol_ctl_reg, + TX_VOL_CTL_CFG_MUTE_EN_MASK, 0); + break; + case SND_SOC_DAPM_PRE_PMD: + snd_soc_update_bits(codec, tx_vol_ctl_reg, + TX_VOL_CTL_CFG_MUTE_EN_MASK, + TX_VOL_CTL_CFG_MUTE_EN_ENABLE); + snd_soc_update_bits(codec, tx_mux_ctl_reg, + TX_MUX_CTL_HPF_BP_SEL_MASK, + TX_MUX_CTL_HPF_BP_SEL_BYPASS); + break; + case SND_SOC_DAPM_POST_PMD: + snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, + 1 << w->shift); + snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0); + snd_soc_update_bits(codec, tx_mux_ctl_reg, + TX_MUX_CTL_HPF_BP_SEL_MASK, + TX_MUX_CTL_HPF_BP_SEL_BYPASS); + snd_soc_update_bits(codec, tx_vol_ctl_reg, + TX_VOL_CTL_CFG_MUTE_EN_MASK, 0); + break; + } + + return 0; +} + +static int msm8916_wcd_digital_enable_dmic(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + unsigned int dmic; + int ret; + /* get dmic number out of widget name */ + char *dmic_num = strpbrk(w->name, "12"); + + if (dmic_num == NULL) { + dev_err(codec->dev, "Invalid DMIC\n"); + return -EINVAL; + } + ret = kstrtouint(dmic_num, 10, &dmic); + if (ret < 0 || dmic > 2) { + dev_err(codec->dev, "Invalid DMIC line on the codec\n"); + return -EINVAL; + } + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + snd_soc_update_bits(codec, LPASS_CDC_CLK_DMIC_B1_CTL, + DMIC_B1_CTL_DMIC0_CLK_SEL_MASK, + DMIC_B1_CTL_DMIC0_CLK_SEL_DIV3); + + switch (dmic) { + case 1: + snd_soc_update_bits(codec, LPASS_CDC_TX1_MUX_CTL, + TX_MUX_CTL_ADC_DMIC_SEL_MASK, + TX_MUX_CTL_ADC_DMIC_SEL_DMIC); + snd_soc_update_bits(codec, LPASS_CDC_TX1_DMIC_CTL, + TXN_DMIC_CTL_CLK_SEL_MASK, + TXN_DMIC_CTL_CLK_SEL_DIV3); + + break; + case 2: + snd_soc_update_bits(codec, LPASS_CDC_TX2_MUX_CTL, + TX_MUX_CTL_ADC_DMIC_SEL_MASK, + TX_MUX_CTL_ADC_DMIC_SEL_DMIC); + snd_soc_update_bits(codec, LPASS_CDC_TX2_DMIC_CTL, + TXN_DMIC_CTL_CLK_SEL_MASK, + TXN_DMIC_CTL_CLK_SEL_DIV3); + break; + } + break; + } + + return 0; +} + +static const struct snd_soc_dapm_widget msm8916_wcd_digital_dapm_widgets[] = { + /*RX stuff */ + SND_SOC_DAPM_AIF_IN("I2S RX1", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("I2S RX2", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_IN("I2S RX3", NULL, 0, SND_SOC_NOPM, 0, 0), + + SND_SOC_DAPM_OUTPUT("PDM_RX1"), + SND_SOC_DAPM_OUTPUT("PDM_RX2"), + SND_SOC_DAPM_OUTPUT("PDM_RX3"), + + SND_SOC_DAPM_INPUT("LPASS_PDM_TX"), + + SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_MIXER("RX3 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0), + + /* Interpolator */ + SND_SOC_DAPM_MIXER_E("RX1 INT", LPASS_CDC_CLK_RX_B1_CTL, 0, 0, NULL, + 0, msm8916_wcd_digital_enable_interpolator, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_MIXER_E("RX2 INT", LPASS_CDC_CLK_RX_B1_CTL, 1, 0, NULL, + 0, msm8916_wcd_digital_enable_interpolator, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_MIXER_E("RX3 INT", LPASS_CDC_CLK_RX_B1_CTL, 2, 0, NULL, + 0, msm8916_wcd_digital_enable_interpolator, + SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0, + &rx_mix1_inp1_mux), + SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0, + &rx_mix1_inp2_mux), + SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0, + &rx_mix1_inp3_mux), + SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0, + &rx2_mix1_inp1_mux), + SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0, + &rx2_mix1_inp2_mux), + SND_SOC_DAPM_MUX("RX2 MIX1 INP3", SND_SOC_NOPM, 0, 0, + &rx2_mix1_inp3_mux), + SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0, + &rx3_mix1_inp1_mux), + SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0, + &rx3_mix1_inp2_mux), + SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0, + &rx3_mix1_inp3_mux), + + /* TX */ + SND_SOC_DAPM_MIXER("ADC1", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_MIXER("ADC3", SND_SOC_NOPM, 0, 0, NULL, 0), + + SND_SOC_DAPM_MUX_E("DEC1 MUX", LPASS_CDC_CLK_TX_CLK_EN_B1_CTL, 0, 0, + &dec1_mux, msm8916_wcd_digital_enable_dec, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_MUX_E("DEC2 MUX", LPASS_CDC_CLK_TX_CLK_EN_B1_CTL, 1, 0, + &dec2_mux, msm8916_wcd_digital_enable_dec, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | + SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_AIF_OUT("I2S TX1", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("I2S TX2", NULL, 0, SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_AIF_OUT("I2S TX3", NULL, 0, SND_SOC_NOPM, 0, 0), + + /* Digital Mic Inputs */ + SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0, + msm8916_wcd_digital_enable_dmic, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0, + msm8916_wcd_digital_enable_dmic, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), + SND_SOC_DAPM_SUPPLY("DMIC_CLK", LPASS_CDC_CLK_DMIC_B1_CTL, 0, 0, + NULL, 0), + SND_SOC_DAPM_SUPPLY("RX_I2S_CLK", LPASS_CDC_CLK_RX_I2S_CTL, + 4, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("TX_I2S_CLK", LPASS_CDC_CLK_TX_I2S_CTL, 4, 0, + NULL, 0), + + SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0, NULL, 0), + SND_SOC_DAPM_SUPPLY("PDM_CLK", LPASS_CDC_CLK_PDM_CTL, 0, 0, NULL, 0), + /* Connectivity Clock */ + SND_SOC_DAPM_SUPPLY_S("CDC_CONN", -2, LPASS_CDC_CLK_OTHR_CTL, 2, 0, + NULL, 0), + +}; + +static int msm8916_wcd_digital_get_clks(struct platform_device *pdev, + struct msm8916_wcd_digital_priv *priv) +{ + struct device *dev = &pdev->dev; + + priv->ahbclk = devm_clk_get(dev, "ahbix-clk"); + if (IS_ERR(priv->ahbclk)) { + dev_err(dev, "failed to get ahbix clk\n"); + return PTR_ERR(priv->ahbclk); + } + + priv->mclk = devm_clk_get(dev, "mclk"); + if (IS_ERR(priv->mclk)) { + dev_err(dev, "failed to get mclk\n"); + return PTR_ERR(priv->mclk); + } + + return 0; +} + +static int msm8916_wcd_digital_codec_probe(struct snd_soc_codec *codec) +{ + struct msm8916_wcd_digital_priv *priv = dev_get_drvdata(codec->dev); + + snd_soc_codec_set_drvdata(codec, priv); + + return 0; +} + +static int msm8916_wcd_digital_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + u8 tx_fs_rate; + u8 rx_fs_rate; + + switch (params_rate(params)) { + case 8000: + tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_8_KHZ; + rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_8_KHZ; + break; + case 16000: + tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_16_KHZ; + rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_16_KHZ; + break; + case 32000: + tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_32_KHZ; + rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_32_KHZ; + break; + case 48000: + tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_48_KHZ; + rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_48_KHZ; + break; + default: + dev_err(dai->codec->dev, "Invalid sampling rate %d\n", + params_rate(params)); + return -EINVAL; + } + + switch (substream->stream) { + case SNDRV_PCM_STREAM_CAPTURE: + snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL, + TX_I2S_CTL_TX_I2S_FS_RATE_MASK, tx_fs_rate); + break; + case SNDRV_PCM_STREAM_PLAYBACK: + snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_RX_I2S_CTL, + RX_I2S_CTL_RX_I2S_FS_RATE_MASK, rx_fs_rate); + break; + default: + return -EINVAL; + } + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL, + TX_I2S_CTL_TX_I2S_MODE_MASK, + TX_I2S_CTL_TX_I2S_MODE_16); + snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_RX_I2S_CTL, + RX_I2S_CTL_RX_I2S_MODE_MASK, + RX_I2S_CTL_RX_I2S_MODE_16); + break; + case SNDRV_PCM_FORMAT_S24_LE: + snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL, + TX_I2S_CTL_TX_I2S_MODE_MASK, + TX_I2S_CTL_TX_I2S_MODE_32); + snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_RX_I2S_CTL, + RX_I2S_CTL_RX_I2S_MODE_MASK, + RX_I2S_CTL_RX_I2S_MODE_32); + break; + default: + dev_err(dai->dev, "%s: wrong format selected\n", __func__); + return -EINVAL; + } + + return 0; +} + +static const struct snd_soc_dapm_route msm8916_wcd_digital_audio_map[] = { + + {"I2S RX1", NULL, "AIF1 Playback"}, + {"I2S RX2", NULL, "AIF1 Playback"}, + {"I2S RX3", NULL, "AIF1 Playback"}, + + {"AIF1 Capture", NULL, "I2S TX1"}, + {"AIF1 Capture", NULL, "I2S TX2"}, + {"AIF1 Capture", NULL, "I2S TX3"}, + + /* Decimator Inputs */ + {"DEC1 MUX", "DMIC1", "DMIC1"}, + {"DEC1 MUX", "DMIC2", "DMIC2"}, + {"DEC1 MUX", "ADC1", "ADC1"}, + {"DEC1 MUX", "ADC2", "ADC2"}, + {"DEC1 MUX", "ADC3", "ADC3"}, + {"DEC1 MUX", NULL, "CDC_CONN"}, + + {"DEC2 MUX", "DMIC1", "DMIC1"}, + {"DEC2 MUX", "DMIC2", "DMIC2"}, + {"DEC2 MUX", "ADC1", "ADC1"}, + {"DEC2 MUX", "ADC2", "ADC2"}, + {"DEC2 MUX", "ADC3", "ADC3"}, + {"DEC2 MUX", NULL, "CDC_CONN"}, + + {"DMIC1", NULL, "DMIC_CLK"}, + {"DMIC2", NULL, "DMIC_CLK"}, + + {"I2S TX1", NULL, "DEC1 MUX"}, + {"I2S TX2", NULL, "DEC2 MUX"}, + + {"I2S TX1", NULL, "TX_I2S_CLK"}, + {"I2S TX2", NULL, "TX_I2S_CLK"}, + + {"TX_I2S_CLK", NULL, "MCLK"}, + {"TX_I2S_CLK", NULL, "PDM_CLK"}, + + {"ADC1", NULL, "LPASS_PDM_TX"}, + {"ADC2", NULL, "LPASS_PDM_TX"}, + {"ADC3", NULL, "LPASS_PDM_TX"}, + + {"I2S RX1", NULL, "RX_I2S_CLK"}, + {"I2S RX2", NULL, "RX_I2S_CLK"}, + {"I2S RX3", NULL, "RX_I2S_CLK"}, + + {"RX_I2S_CLK", NULL, "PDM_CLK"}, + {"RX_I2S_CLK", NULL, "MCLK"}, + {"RX_I2S_CLK", NULL, "CDC_CONN"}, + + /* RX1 PATH.. */ + {"PDM_RX1", NULL, "RX1 INT"}, + {"RX1 INT", NULL, "RX1 MIX1"}, + + {"RX1 MIX1", NULL, "RX1 MIX1 INP1"}, + {"RX1 MIX1", NULL, "RX1 MIX1 INP2"}, + {"RX1 MIX1", NULL, "RX1 MIX1 INP3"}, + + {"RX1 MIX1 INP1", "RX1", "I2S RX1"}, + {"RX1 MIX1 INP1", "RX2", "I2S RX2"}, + {"RX1 MIX1 INP1", "RX3", "I2S RX3"}, + + {"RX1 MIX1 INP2", "RX1", "I2S RX1"}, + {"RX1 MIX1 INP2", "RX2", "I2S RX2"}, + {"RX1 MIX1 INP2", "RX3", "I2S RX3"}, + + {"RX1 MIX1 INP3", "RX1", "I2S RX1"}, + {"RX1 MIX1 INP3", "RX2", "I2S RX2"}, + {"RX1 MIX1 INP3", "RX3", "I2S RX3"}, + + /* RX2 PATH */ + {"PDM_RX2", NULL, "RX2 INT"}, + {"RX2 INT", NULL, "RX2 MIX1"}, + + {"RX2 MIX1", NULL, "RX2 MIX1 INP1"}, + {"RX2 MIX1", NULL, "RX2 MIX1 INP2"}, + {"RX2 MIX1", NULL, "RX2 MIX1 INP3"}, + + {"RX2 MIX1 INP1", "RX1", "I2S RX1"}, + {"RX2 MIX1 INP1", "RX2", "I2S RX2"}, + {"RX2 MIX1 INP1", "RX3", "I2S RX3"}, + + {"RX2 MIX1 INP2", "RX1", "I2S RX1"}, + {"RX2 MIX1 INP2", "RX2", "I2S RX2"}, + {"RX2 MIX1 INP2", "RX3", "I2S RX3"}, + + {"RX2 MIX1 INP3", "RX1", "I2S RX1"}, + {"RX2 MIX1 INP3", "RX2", "I2S RX2"}, + {"RX2 MIX1 INP3", "RX3", "I2S RX3"}, + + /* RX3 PATH */ + {"PDM_RX3", NULL, "RX3 INT"}, + {"RX3 INT", NULL, "RX3 MIX1"}, + + {"RX3 MIX1", NULL, "RX3 MIX1 INP1"}, + {"RX3 MIX1", NULL, "RX3 MIX1 INP2"}, + {"RX3 MIX1", NULL, "RX3 MIX1 INP3"}, + + {"RX3 MIX1 INP1", "RX1", "I2S RX1"}, + {"RX3 MIX1 INP1", "RX2", "I2S RX2"}, + {"RX3 MIX1 INP1", "RX3", "I2S RX3"}, + + {"RX3 MIX1 INP2", "RX1", "I2S RX1"}, + {"RX3 MIX1 INP2", "RX2", "I2S RX2"}, + {"RX3 MIX1 INP2", "RX3", "I2S RX3"}, + + {"RX3 MIX1 INP3", "RX1", "I2S RX1"}, + {"RX3 MIX1 INP3", "RX2", "I2S RX2"}, + {"RX3 MIX1 INP3", "RX3", "I2S RX3"}, + +}; + +static int msm8916_wcd_digital_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + struct snd_soc_codec *codec = dai->codec; + struct msm8916_wcd_digital_priv *msm8916_wcd; + unsigned long mclk_rate; + + msm8916_wcd = snd_soc_codec_get_drvdata(codec); + snd_soc_update_bits(codec, LPASS_CDC_CLK_MCLK_CTL, + MCLK_CTL_MCLK_EN_MASK, + MCLK_CTL_MCLK_EN_ENABLE); + snd_soc_update_bits(codec, LPASS_CDC_CLK_PDM_CTL, + LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_MASK, + LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_FB); + + mclk_rate = clk_get_rate(msm8916_wcd->mclk); + switch (mclk_rate) { + case 12288000: + snd_soc_update_bits(codec, LPASS_CDC_TOP_CTL, + TOP_CTL_DIG_MCLK_FREQ_MASK, + TOP_CTL_DIG_MCLK_FREQ_F_12_288MHZ); + break; + case 9600000: + snd_soc_update_bits(codec, LPASS_CDC_TOP_CTL, + TOP_CTL_DIG_MCLK_FREQ_MASK, + TOP_CTL_DIG_MCLK_FREQ_F_9_6MHZ); + break; + default: + dev_err(codec->dev, "Invalid mclk rate %ld\n", mclk_rate); + break; + } + return 0; +} + +static void msm8916_wcd_digital_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_PDM_CTL, + LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_MASK, 0); +} + +static struct snd_soc_dai_ops msm8916_wcd_digital_dai_ops = { + .startup = msm8916_wcd_digital_startup, + .shutdown = msm8916_wcd_digital_shutdown, + .hw_params = msm8916_wcd_digital_hw_params, +}; + +static struct snd_soc_dai_driver msm8916_wcd_digital_dai[] = { + [0] = { + .name = "msm8916_wcd_digital_i2s_rx1", + .id = 0, + .playback = { + .stream_name = "AIF1 Playback", + .rates = MSM8916_WCD_DIGITAL_RATES, + .formats = MSM8916_WCD_DIGITAL_FORMATS, + .channels_min = 1, + .channels_max = 3, + }, + .ops = &msm8916_wcd_digital_dai_ops, + }, + [1] = { + .name = "msm8916_wcd_digital_i2s_tx1", + .id = 1, + .capture = { + .stream_name = "AIF1 Capture", + .rates = MSM8916_WCD_DIGITAL_RATES, + .formats = MSM8916_WCD_DIGITAL_FORMATS, + .channels_min = 1, + .channels_max = 4, + }, + .ops = &msm8916_wcd_digital_dai_ops, + }, +}; + +static struct snd_soc_codec_driver msm8916_wcd_digital = { + .probe = msm8916_wcd_digital_codec_probe, + .component_driver = { + .controls = msm8916_wcd_digital_snd_controls, + .num_controls = ARRAY_SIZE(msm8916_wcd_digital_snd_controls), + .dapm_widgets = msm8916_wcd_digital_dapm_widgets, + .num_dapm_widgets = + ARRAY_SIZE(msm8916_wcd_digital_dapm_widgets), + .dapm_routes = msm8916_wcd_digital_audio_map, + .num_dapm_routes = ARRAY_SIZE(msm8916_wcd_digital_audio_map), + }, +}; + +static const struct regmap_config msm8916_codec_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = LPASS_CDC_TX2_DMIC_CTL, + .cache_type = REGCACHE_FLAT, +}; + +static int msm8916_wcd_digital_probe(struct platform_device *pdev) +{ + struct msm8916_wcd_digital_priv *priv; + struct device *dev = &pdev->dev; + void __iomem *base; + struct resource *mem_res; + struct regmap *digital_map; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, mem_res); + if (IS_ERR(base)) + return PTR_ERR(base); + + digital_map = + devm_regmap_init_mmio(&pdev->dev, base, + &msm8916_codec_regmap_config); + if (IS_ERR(digital_map)) + return PTR_ERR(digital_map); + + ret = msm8916_wcd_digital_get_clks(pdev, priv); + if (ret < 0) + return ret; + + ret = clk_prepare_enable(priv->ahbclk); + if (ret < 0) { + dev_err(dev, "failed to enable ahbclk %d\n", ret); + return ret; + } + + /* set mclk default mclk rate */ + clk_set_rate(priv->mclk, DEFAULT_MCLK_RATE); + + ret = clk_prepare_enable(priv->mclk); + if (ret < 0) { + dev_err(dev, "failed to enable mclk %d\n", ret); + return ret; + } + + dev_set_drvdata(dev, priv); + + return snd_soc_register_codec(dev, &msm8916_wcd_digital, + msm8916_wcd_digital_dai, + ARRAY_SIZE(msm8916_wcd_digital_dai)); +} + +static int msm8916_wcd_digital_remove(struct platform_device *pdev) +{ + struct msm8916_wcd_digital_priv *priv = dev_get_drvdata(&pdev->dev); + + snd_soc_unregister_codec(&pdev->dev); + clk_disable_unprepare(priv->mclk); + clk_disable_unprepare(priv->ahbclk); + + return 0; +} + +static const struct of_device_id msm8916_wcd_digital_match_table[] = { + { .compatible = "qcom,msm8916-wcd-digital-codec" }, + { } +}; + +MODULE_DEVICE_TABLE(of, msm8916_wcd_digital_match_table); + +static struct platform_driver msm8916_wcd_digital_driver = { + .driver = { + .name = "msm8916-wcd-digital-codec", + .of_match_table = msm8916_wcd_digital_match_table, + }, + .probe = msm8916_wcd_digital_probe, + .remove = msm8916_wcd_digital_remove, +}; + +module_platform_driver(msm8916_wcd_digital_driver); + +MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>"); +MODULE_DESCRIPTION("MSM8916 WCD Digital Codec driver"); +MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c index 07f91e918b23..d00c8c7c1c78 100644 --- a/sound/soc/qcom/apq8016_sbc.c +++ b/sound/soc/qcom/apq8016_sbc.c @@ -35,10 +35,18 @@ struct apq8016_sbc_data { #define MIC_CTRL_TLMM_SCLK_EN BIT(1) #define SPKR_CTL_PRI_WS_SLAVE_SEL_11 (BIT(17) | BIT(16)) +static const struct snd_soc_dapm_widget msm8x16_dapm_widgets[] = { + + SND_SOC_DAPM_MIC("Digital Mic1", NULL), + SND_SOC_DAPM_MIC("Digital Mic2", NULL), +}; + static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_card *card = rtd->card; + struct snd_soc_codec *codec = rtd->codec; + struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec); struct apq8016_sbc_data *pdata = snd_soc_card_get_drvdata(card); int rval = 0; @@ -67,6 +75,10 @@ static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd) break; } + snd_soc_dapm_new_controls(dapm, msm8x16_dapm_widgets, + ARRAY_SIZE(msm8x16_dapm_widgets)); + + snd_soc_dapm_sync(dapm); return rval; } @@ -123,20 +135,15 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card) return ERR_PTR(-EINVAL); } - link->codec_of_node = of_parse_phandle(codec, "sound-dai", 0); - if (!link->codec_of_node) { - dev_err(card->dev, "error getting codec phandle\n"); - return ERR_PTR(-EINVAL); - } - ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name); if (ret) { dev_err(card->dev, "error getting cpu dai name\n"); return ERR_PTR(ret); } - ret = snd_soc_of_get_dai_name(codec, &link->codec_dai_name); - if (ret) { + ret = snd_soc_of_get_dai_link_codecs(dev, codec, link); + + if (ret < 0) { dev_err(card->dev, "error getting codec dai name\n"); return ERR_PTR(ret); } diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c index 420d200f9a05..dd5bdd0da730 100644 --- a/sound/soc/qcom/lpass-platform.c +++ b/sound/soc/qcom/lpass-platform.c @@ -25,8 +25,7 @@ #include "lpass.h" struct lpass_pcm_data { - int rdma_ch; - int wrdma_ch; + int dma_ch; int i2s_port; }; @@ -95,10 +94,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream) return ret; } - if (dir == SNDRV_PCM_STREAM_PLAYBACK) - data->rdma_ch = dma_ch; - else - data->wrdma_ch = dma_ch; + data->dma_ch = dma_ch; snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware); @@ -125,20 +121,12 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream) snd_soc_platform_get_drvdata(soc_runtime->platform); struct lpass_variant *v = drvdata->variant; struct lpass_pcm_data *data; - int dma_ch, dir = substream->stream; data = runtime->private_data; v = drvdata->variant; - - if (dir == SNDRV_PCM_STREAM_PLAYBACK) - dma_ch = data->rdma_ch; - else - dma_ch = data->wrdma_ch; - - drvdata->substream[dma_ch] = NULL; - + drvdata->substream[data->dma_ch] = NULL; if (v->free_dma_channel) - v->free_dma_channel(drvdata, dma_ch); + v->free_dma_channel(drvdata, data->dma_ch); return 0; } @@ -159,10 +147,7 @@ static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream, int bitwidth; int ret, dma_port = pcm_data->i2s_port + v->dmactl_audif_start; - if (dir == SNDRV_PCM_STREAM_PLAYBACK) - ch = pcm_data->rdma_ch; - else - ch = pcm_data->wrdma_ch; + ch = pcm_data->dma_ch; bitwidth = snd_pcm_format_width(format); if (bitwidth < 0) { @@ -249,11 +234,7 @@ static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream) unsigned int reg; int ret; - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - reg = LPAIF_RDMACTL_REG(v, pcm_data->rdma_ch); - else - reg = LPAIF_WRDMACTL_REG(v, pcm_data->wrdma_ch); - + reg = LPAIF_DMACTL_REG(v, pcm_data->dma_ch, substream->stream); ret = regmap_write(drvdata->lpaif_map, reg, 0); if (ret) dev_err(soc_runtime->dev, "%s() error writing to rdmactl reg: %d\n", @@ -273,10 +254,7 @@ static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream) struct lpass_variant *v = drvdata->variant; int ret, ch, dir = substream->stream; - if (dir == SNDRV_PCM_STREAM_PLAYBACK) - ch = pcm_data->rdma_ch; - else - ch = pcm_data->wrdma_ch; + ch = pcm_data->dma_ch; ret = regmap_write(drvdata->lpaif_map, LPAIF_DMABASE_REG(v, ch, dir), @@ -328,10 +306,7 @@ static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream, struct lpass_variant *v = drvdata->variant; int ret, ch, dir = substream->stream; - if (dir == SNDRV_PCM_STREAM_PLAYBACK) - ch = pcm_data->rdma_ch; - else - ch = pcm_data->wrdma_ch; + ch = pcm_data->dma_ch; switch (cmd) { case SNDRV_PCM_TRIGGER_START: @@ -406,10 +381,7 @@ static snd_pcm_uframes_t lpass_platform_pcmops_pointer( unsigned int base_addr, curr_addr; int ret, ch, dir = substream->stream; - if (dir == SNDRV_PCM_STREAM_PLAYBACK) - ch = pcm_data->rdma_ch; - else - ch = pcm_data->wrdma_ch; + ch = pcm_data->dma_ch; ret = regmap_read(drvdata->lpaif_map, LPAIF_DMABASE_REG(v, ch, dir), &base_addr); diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c index 2d833bffdba0..8fcac2ac3aa6 100644 --- a/sound/soc/qcom/storm.c +++ b/sound/soc/qcom/storm.c @@ -58,7 +58,7 @@ static int storm_ops_hw_params(struct snd_pcm_substream *substream, return 0; } -static struct snd_soc_ops storm_soc_ops = { +static const struct snd_soc_ops storm_soc_ops = { .hw_params = storm_ops_hw_params, }; |