diff options
688 files changed, 22291 insertions, 3579 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/imem.txt b/Documentation/devicetree/bindings/arm/msm/imem.txt index 2989fbfe7972..440628d02630 100644 --- a/Documentation/devicetree/bindings/arm/msm/imem.txt +++ b/Documentation/devicetree/bindings/arm/msm/imem.txt @@ -73,6 +73,11 @@ USB Diag Cookies: Memory region used to store USB PID and serial numbers to be used by bootloader in download mode. +SSR Minidump Offset +------------------- +-Compatible: "qcom,msm-imem-minidump" +-reg: start address and size of ssr imem region + Required properties: -compatible: "qcom,msm-imem-diag-dload" -reg: start address and size of USB Diag download mode region in imem @@ -121,4 +126,9 @@ Example: compatible = "qcom,msm-imem-emergency_download_mode"; reg = <0xfe0 12>; }; + + ss_mdump@b88 { + compatible = "qcom,msm-imem-minidump"; + reg = <0xb88 28>; + }; }; diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt index c264f9f65265..e4622558af55 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm.txt @@ -86,9 +86,6 @@ SoCs: - MSM8998 compatible = "qcom,msm8998" -- MSM8998_9x55 - compatible = "qcom,msm8998-9x55" - - MSMHAMSTER compatible = "qcom,msmhamster" @@ -283,8 +280,6 @@ compatible = "qcom,msm8998-rumi" compatible = "qcom,msm8998-cdp" compatible = "qcom,msm8998-mtp" compatible = "qcom,msm8998-qrd" -compatible = "qcom,msm8998-9x55-cdp" -compatible = "qcom,msm8998-9x55-mtp" compatible = "qcom,msmhamster-rumi" compatible = "qcom,msmhamster-cdp" compatible = "qcom,msmhamster-mtp" diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt index e14acdc6303e..1583da81c090 100644 --- a/Documentation/devicetree/bindings/display/msm/sde.txt +++ b/Documentation/devicetree/bindings/display/msm/sde.txt @@ -169,6 +169,7 @@ Optional properties: e.g. qcom,sde-sspp-vig-blocks -- qcom,sde-vig-csc-off: offset of CSC hardware -- qcom,sde-vig-qseed-off: offset of QSEED hardware + -- qcom,sde-vig-qseed-size: A u32 address range for qseed scaler. -- qcom,sde-vig-pcc: offset and version of PCC hardware -- qcom,sde-vig-hsic: offset and version of global PA adjustment -- qcom,sde-vig-memcolor: offset and version of PA memcolor hardware @@ -178,6 +179,7 @@ Optional properties: indicates that the SSPP RGB contains that feature hardware. e.g. qcom,sde-sspp-vig-blocks -- qcom,sde-rgb-scaler-off: offset of RGB scaler hardware + -- qcom,sde-rgb-scaler-size: A u32 address range for scaler. -- qcom,sde-rgb-pcc: offset and version of PCC hardware - qcom,sde-dspp-blocks: A node that lists the blocks inside the DSPP hardware. The block entries will contain the offset and version of each @@ -417,6 +419,7 @@ Example: qcom,sde-sspp-vig-blocks { qcom,sde-vig-csc-off = <0x320>; qcom,sde-vig-qseed-off = <0x200>; + qcom,sde-vig-qseed-size = <0x74>; /* Offset from vig top, version of HSIC */ qcom,sde-vig-hsic = <0x200 0x00010000>; qcom,sde-vig-memcolor = <0x200 0x00010000>; @@ -425,6 +428,7 @@ Example: qcom,sde-sspp-rgb-blocks { qcom,sde-rgb-scaler-off = <0x200>; + qcom,sde-rgb-scaler-size = <0x74>; qcom,sde-rgb-pcc = <0x380 0x00010000>; }; diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt index bb413af4b54d..c5c82a89f662 100644 --- a/Documentation/devicetree/bindings/media/video/msm-cci.txt +++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt @@ -205,6 +205,31 @@ Optional properties: (in the same order). - cam_vaf-supply : should contain regulator from which AF voltage is supplied +* Qualcomm Technologies, Inc. MSM LASER LED + +Required properties: +- cell-index : should contain unique identifier to differentiate + between multiple laser led modules +- reg : should contain i2c slave address of the laser led and length of + data field which is 0x0 +- compatible : + - "qcom,laser-led" +- qcom,cci-master : should contain i2c master id to be used for this camera + sensor + - 0 -> MASTER 0 + - 1 -> MASTER 1 + +Optional properties: +- qcom,cam-vreg-name : should contain names of all regulators needed by this + laser led +- qcom,cam-vreg-min-voltage : should contain minimum voltage level in microvolts + for regulators mentioned in qcom,cam-vreg-name property (in the same order) +- qcom,cam-vreg-max-voltage : should contain maximum voltage level in microvolts + for regulators mentioned in qcom,cam-vreg-name property (in the same order) +- qcom,cam-vreg-op-mode : should contain the maximum current in microamps + required from the regulators mentioned in the qcom,cam-vreg-name property + (in the same order). + * Qualcomm Technologies, Inc. MSM OIS Required properties: @@ -277,6 +302,13 @@ Example: qcom,cam-vreg-op-mode = <100000>; }; + laserled0: qcom,laserled@0 { + cell-index = <0>; + reg = <0x0>; + compatible = "qcom,laser-led"; + qcom,cci-master = <1>; + }; + qcom,camera@0 { cell-index = <0>; compatible = "qcom,camera"; diff --git a/Documentation/devicetree/bindings/net/can/k61-can.txt b/Documentation/devicetree/bindings/net/can/k61-can.txt new file mode 100644 index 000000000000..ea4a7b4ae035 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/k61-can.txt @@ -0,0 +1,34 @@ +* Kinetis K61 CAN * + +This driver implements SPI slave protocol for Freescale K61 CAN controller. + +Required properties: + - compatible: Should be "fsl,k61" or "nxp,mpc5746c". + - reg: Should contain SPI chip select. + - interrupt-parent: Should specify interrupt controller for the interrupt. + - interrupts: Should contain IRQ line for the CAN controller. + +Optional properties: + - reset-gpio: Reference to the GPIO connected to the reset input. + - pinctrl-names : Names corresponding to the numbered pinctrl states. + - pinctrl-0 : This explains the active state of the GPIO line. + - pinctrl-1 : This explains the suspend state of the GPIO line. + - bits-per-word: Indicate how many bits are in a SPI frame. e.g.: 8, 16, 32. + Default to 16. + - reset-delay-msec: Delay in milliseconds to be applied after resetting the chip. + Default to 1 ms. + +Example: + + can-controller@0 { + compatible = "fsl,k61"; + reg = <0>; + interrupt-parent = <&tlmm_pinmux>; + interrupts = <25 0>; + reset-gpio = <&tlmm_pinmux 11 0x1>; + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&can_rst_on>; + pinctrl-1 = <&can_rst_off>; + bits-per-word = <8>; + reset-delay-msec = <100>; + }; diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt b/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt index acc850773210..c1a8d1bd697d 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt +++ b/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt @@ -11,13 +11,24 @@ Required properties: - compatible: "qcom,wcn3990-wifi"; - reg: Memory regions defined as starting address and size - reg-names: Names of the memory regions defined in reg entry + - clocks: List of clock phandles + - clock-names: List of clock names corresponding to the "clocks" property - interrupts: Copy engine interrupt table +Optional properties: + - <supply-name>-supply: phandle to the regulator device tree node + optional "supply-name" is "vdd-0.8-cx-mx". + - qcom,<supply>-config: Specifies voltage levels for supply. Should be + specified in pairs (min, max), units uV. There can + be optional load in uA and Regulator settle delay in + uS. Example: msm_ath10k_wlan: qcom,msm_ath10k_wlan@18800000 { compatible = "qcom,wcn3990-wifi"; reg = <0x18800000 0x800000>; reg-names = "membase"; + clocks = <&clock_gcc clk_aggre2_noc_clk>; + clock-names = "smmu_aggre2_noc_clk"; interrupts = <0 130 0 /* CE0 */ >, <0 131 0 /* CE1 */ >, @@ -31,4 +42,10 @@ Example: <0 139 0 /* CE9 */ >, <0 140 0 /* CE10 */ >, <0 141 0 /* CE11 */ >; + vdd-0.8-cx-mx-supply = <&pm8998_l5>; + vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>; + vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>; + vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>; + qcom,vdd-0.8-cx-mx-config = <800000 800000>; + qcom,vdd-3.3-ch0-config = <3104000 3312000>; }; diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt index 47a6fdd300ca..406920b7246e 100644 --- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt +++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt @@ -88,6 +88,7 @@ Optional properties: - qcom,override-acc-1: Override the default ACC settings with this value if present. - qcom,cx-ipeak-vote: Boolean- Present if we need to set bit 5 of cxip_lm_vote_clear during modem shutdown +- qcom,minidump-id: Unique id for each subsystem One child node to represent the MBA image may be specified, when the MBA image needs to be loaded in a specifically carved out memory region. diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt index 012368275db3..4207b1f0615a 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt @@ -271,6 +271,14 @@ First Level Node - FG Gen3 device Definition: A boolean property that when defined holds SOC at 100% when the battery is full. +- qcom,linearize-soc + Usage: optional + Value type: <empty> + Definition: A boolean property that when defined linearizes SOC when + the SOC drops after charge termination monotonically to + improve the user experience. This is applicable only if + "qcom,hold-soc-while-full" is specified. + - qcom,ki-coeff-soc-dischg Usage: optional Value type: <prop-encoded-array> @@ -301,6 +309,13 @@ First Level Node - FG Gen3 device is specified to make it fully functional. Value has no unit. Allowed range is 0 to 62200 in micro units. +- qcom,ki-coeff-full-dischg + Usage: optional + Value type: <u32> + Definition: Ki coefficient full SOC value that will be applied during + discharging. If not specified, a value of 0 will be set. + Allowed range is from 245 to 62256. + - qcom,fg-rconn-mohms Usage: optional Value type: <u32> diff --git a/Documentation/devicetree/bindings/soc/qcom/guest_shm.txt b/Documentation/devicetree/bindings/soc/qcom/guest_shm.txt new file mode 100644 index 000000000000..9491344c7b9f --- /dev/null +++ b/Documentation/devicetree/bindings/soc/qcom/guest_shm.txt @@ -0,0 +1,19 @@ +QVM Guest Shared Memory + +guest_shm is a device that enables linux as a guest operating system +to allocate shared memory between virtual machines and send notifications +of updates to other virtual machines. + +Required properties: +- compatible: Must be "qvm,guest_shm". +- interrupt-parent: Parent interrupt controller. +- interrupts: Should contain QVM interrupt. +- reg: Physical address of the guest factory and length. + +Example: + qvm,guest_shm { + compatible = "qvm,guest_shm"; + interrupt-parent = <&gic>; + interrupts = <6 4>; + reg = <0x1c050000 0x1000>; + }; diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt index 47fad8aa4a1a..54792335e67e 100644 --- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt +++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt @@ -64,6 +64,8 @@ Optional properties : device provides both "USB" and "USB-HOST" events. - qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs, which is used as a vote by driver to get max performance in perf mode. +- qcom,no-wakeup-src-in-hostmode: If present then driver doesn't use wakeup_source APIs + in host mode. This allows PM suspend to happen irrespective of runtimePM state of host. Sub nodes: - Sub node for "DWC3- USB3 controller". diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index e3bed3a961a3..bfaca0cf9adf 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -188,6 +188,7 @@ qcom Qualcomm Technologies, Inc qemu QEMU, a generic and open source machine emulator and virtualizer qi Qi Hardware qnap QNAP Systems, Inc. +qvm BlackBerry Ltd radxa Radxa raidsonic RaidSonic Technology GmbH ralink Mediatek/Ralink Technology Corp. @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 80 +SUBLEVEL = 85 EXTRAVERSION = NAME = Blurry Fish Butt diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg index d1e3b0891a4e..419dca62542a 100644 --- a/android/configs/android-base.cfg +++ b/android/configs/android-base.cfg @@ -139,11 +139,6 @@ CONFIG_PPP_DEFLATE=y CONFIG_PPP_MPPE=y CONFIG_PREEMPT=y CONFIG_PROFILING=y -CONFIG_QFMT_V2=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_QUOTA_TREE=y -CONFIG_QUOTACTL=y CONFIG_RANDOMIZE_BASE=y CONFIG_RTC_CLASS=y CONFIG_RT_GROUP_SCHED=y diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg index 11df0892301b..6550d0423f50 100644 --- a/android/configs/android-recommended.cfg +++ b/android/configs/android-recommended.cfg @@ -110,6 +110,11 @@ CONFIG_POWER_SUPPLY=y CONFIG_PSTORE=y CONFIG_PSTORE_CONSOLE=y CONFIG_PSTORE_RAM=y +CONFIG_QFMT_V2=y +CONFIG_QUOTA=y +CONFIG_QUOTACTL=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QUOTA_TREE=y CONFIG_SCHEDSTATS=y CONFIG_SMARTJOYPLUS_FF=y CONFIG_SND=y diff --git a/arch/Kconfig b/arch/Kconfig index 98f64ad1caf1..ed2539c590bf 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -225,8 +225,8 @@ config ARCH_INIT_TASK config ARCH_TASK_STRUCT_ALLOCATOR bool -# Select if arch has its private alloc_thread_info() function -config ARCH_THREAD_INFO_ALLOCATOR +# Select if arch has its private alloc_thread_stack() function +config ARCH_THREAD_STACK_ALLOCATOR bool # Select if arch wants to size task_struct dynamically via arch_task_struct_size: diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 210ef3e72332..0ddd7144c492 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -88,7 +88,9 @@ extern int ioc_exists; #define ARC_REG_SLC_FLUSH 0x904 #define ARC_REG_SLC_INVALIDATE 0x905 #define ARC_REG_SLC_RGN_START 0x914 +#define ARC_REG_SLC_RGN_START1 0x915 #define ARC_REG_SLC_RGN_END 0x916 +#define ARC_REG_SLC_RGN_END1 0x917 /* Bit val in SLC_CONTROL */ #define SLC_CTRL_IM 0x040 diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index d81b6d7e11e7..9a84cbdd44b0 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -543,6 +543,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) static DEFINE_SPINLOCK(lock); unsigned long flags; unsigned int ctrl; + phys_addr_t end; spin_lock_irqsave(&lock, flags); @@ -572,8 +573,16 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) * END needs to be setup before START (latter triggers the operation) * END can't be same as START, so add (l2_line_sz - 1) to sz */ - write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); - write_aux_reg(ARC_REG_SLC_RGN_START, paddr); + end = paddr + sz + l2_line_sz - 1; + if (is_pae40_enabled()) + write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end)); + + write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end)); + + if (is_pae40_enabled()) + write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr)); + + write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr)); while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 588393412271..22b546e0f845 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1776,7 +1776,7 @@ source "mm/Kconfig" choice prompt "Virtual Memory Reclaim" - default NO_VM_RECLAIM + default ENABLE_VMALLOC_SAVING help Select the method of reclaiming virtual memory diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts index cd316021d6ce..6c1b45c1af66 100644 --- a/arch/arm/boot/dts/armada-388-gp.dts +++ b/arch/arm/boot/dts/armada-388-gp.dts @@ -89,7 +89,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pca0_pins>; interrupt-parent = <&gpio0>; - interrupts = <18 IRQ_TYPE_EDGE_FALLING>; + interrupts = <18 IRQ_TYPE_LEVEL_LOW>; gpio-controller; #gpio-cells = <2>; interrupt-controller; @@ -101,7 +101,7 @@ compatible = "nxp,pca9555"; pinctrl-names = "default"; interrupt-parent = <&gpio0>; - interrupts = <18 IRQ_TYPE_EDGE_FALLING>; + interrupts = <18 IRQ_TYPE_LEVEL_LOW>; gpio-controller; #gpio-cells = <2>; interrupt-controller; diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile index faff7ea618d0..c944ecfa4eeb 100644 --- a/arch/arm/boot/dts/qcom/Makefile +++ b/arch/arm/boot/dts/qcom/Makefile @@ -172,10 +172,7 @@ dtb-$(CONFIG_ARCH_MSM8998) += msm8998-sim.dtb \ apq8098-v2.1-svr20.dtb \ msm8998-v2.1-interposer-sdm660-cdp.dtb \ msm8998-v2.1-interposer-sdm660-mtp.dtb \ - msm8998-v2.1-interposer-sdm660-qrd.dtb \ - msm8998-9x55-rcm.dtb \ - msm8998-9x55-cdp.dtb \ - msm8998-9x55-mtp.dtb + msm8998-v2.1-interposer-sdm660-qrd.dtb endif dtb-$(CONFIG_ARCH_MSMHAMSTER) += msmhamster-rumi.dtb diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi index c7cecbca3929..7705d01570ac 100644 --- a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi +++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi @@ -11,71 +11,71 @@ */ qcom,ascent_3450mah { - /* Ascent_with_connector_3450mAh_averaged_MasterSlave_Jan6th2017 */ + /* Ascent_wConn_Aging_3450mAh_averaged_MasterSlave_Jul11th2017 */ qcom,max-voltage-uv = <4350000>; qcom,fg-cc-cv-threshold-mv = <4340>; qcom,fastchg-current-ma = <3450>; qcom,batt-id-kohm = <60>; qcom,battery-beta = <3435>; - qcom,battery-type = "ascent_3450mah_averaged_masterslave_jan6th2017"; - qcom,checksum = <0x96AC>; - qcom,gui-version = "PMI8998GUI - 2.0.0.54"; + qcom,battery-type = "ascent_3450mah_averaged_masterslave_jul11th2017"; + qcom,checksum = <0x7C33>; + qcom,gui-version = "PMI8998GUI - 2.0.0.58"; qcom,fg-profile-data = [ - 9C 1F 85 05 - 82 0A 73 FC - 2B 1D 72 EA - EE 03 66 0C - C8 17 F4 22 - E0 45 1F 52 - 5C 00 00 00 - 10 00 00 00 - 00 00 4A C4 - C7 BC 48 C2 - 0F 00 08 00 - E1 DA 5D ED - 8D FD B2 F3 - 96 E2 A7 12 - 7E F4 0E 3B - 24 06 09 20 - 27 00 14 00 - 83 1F EE 05 - 1F 0A 45 FD - 6B 1D 53 E5 - EC 0B 31 14 - 44 18 49 23 - 18 45 A6 53 - 55 00 00 00 - 0E 00 00 00 - 00 00 61 CC - B7 C3 0F BC - 0F 00 00 00 - 92 00 5D ED - E3 06 E0 00 - 75 FD 9C 03 - 47 DB B3 22 - CB 33 CC FF - 07 10 00 00 - 99 0D 99 45 - 0F 00 40 00 - AB 01 0A FA - FF 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 - 00 00 00 00 + 8F 1F 94 05 + 73 0A 4A 06 + 27 1D 21 EA + 16 0A 3B 0C + 07 18 97 22 + A5 3C EC 4A + 5C 00 00 00 + 10 00 00 00 + 00 00 92 BC + CD BD 02 B4 + 11 00 08 00 + 69 DA AD 07 + 4B FD 19 FA + 1D 0C B0 0C + EB F3 78 3B + 24 06 09 20 + 27 00 14 00 + 7E 1F F2 05 + 19 0A 55 FD + 6C 1D C6 ED + 1A 12 FF 1D + 6F 18 EB 22 + B9 45 6F 52 + 55 00 00 00 + 0E 00 00 00 + 00 00 A1 D5 + 34 BA A0 CA + 0F 00 00 00 + 93 00 AD 07 + 8D FD F6 00 + BA 0D 5C 04 + B3 FC F4 1B + C3 33 CC FF + 07 10 00 00 + A4 0D 99 45 + 0F 00 40 00 + A4 01 0A FA + FF 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 + 00 00 00 00 ]; }; diff --git a/arch/arm/boot/dts/qcom/msm-audio.dtsi b/arch/arm/boot/dts/qcom/msm-audio.dtsi index 3a7514397139..75aea7280e6c 100644 --- a/arch/arm/boot/dts/qcom/msm-audio.dtsi +++ b/arch/arm/boot/dts/qcom/msm-audio.dtsi @@ -383,6 +383,7 @@ qcom,msm-cpudai-auxpcm-data = <0>, <0>; qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>; qcom,msm-auxpcm-interface = "primary"; + qcom,msm-cpudai-afe-clk-ver = <2>; }; dai_sec_auxpcm: qcom,msm-sec-auxpcm { diff --git a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi index df7d30210c19..fa21dd7995eb 100644 --- a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi +++ b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi @@ -18,7 +18,7 @@ compatible = "qcom,i2c-pmic"; reg = <0x8>; #address-cells = <1>; - #size-cells = <1>; + #size-cells = <0>; interrupt-parent = <&spmi_bus>; interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>; interrupt_names = "smb138x"; @@ -88,7 +88,7 @@ }; }; - smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 { + smb1381_charger: qcom,smb1381-charger@1000 { compatible = "qcom,smb138x-parallel-slave"; qcom,pmic-revid = <&smb138x_revid>; reg = <0x1000 0x700>; @@ -129,7 +129,7 @@ }; }; -&smb138x_parallel_slave { +&smb1381_charger { smb138x_vbus: qcom,smb138x-vbus { status = "disabled"; regulator-name = "smb138x-vbus"; diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi index 1283cdddc2db..c1728da49d5e 100644 --- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi @@ -1381,6 +1381,7 @@ &usb2s { status = "ok"; + qcom,no-wakeup-src-in-hostmode; }; &usb3 { @@ -1388,6 +1389,7 @@ vbus_dwc3-supply = <&usb_otg_switch>; vdda33-supply = <&pm8994_l24>; vdda18-supply = <&pm8994_l12>; + qcom,no-wakeup-src-in-hostmode; }; &blsp1_uart2 { @@ -1557,5 +1559,9 @@ reg = <0 0xb3fff000 0 0x800000>; label = "early_camera_mem"; }; + early_audio_mem: early_audio_mem@0xb5fff000 { + reg = <0x0 0xb5fff000 0x0 0x3FFFFC>; + label = "early_audio_mem"; + }; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi index c3b986786034..db822dae6e7f 100644 --- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi @@ -1331,6 +1331,10 @@ reg = <0 0xb3fff000 0 0x800000>; label = "early_camera_mem"; }; + early_audio_mem: early_audio_mem@0xb5fff000 { + reg = <0x0 0xb5fff000 0x0 0x3FFFFC>; + label = "early_audio_mem"; + }; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi b/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi index d18344eb3daf..f1cf3136dbd0 100644 --- a/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi @@ -44,6 +44,14 @@ gpios = <&tlmm 22 0>; status = "okay"; }; + + gpio_fan { + /* Based on 5v 75mA MC30100V2 */ + compatible = "gpio-fan"; + gpios = <&tlmm 43 GPIO_ACTIVE_LOW>; + gpio-fan,speed-map = <0 1>, + <8000 0>; + }; }; &soc { @@ -216,11 +224,18 @@ &spi_9 { status = "okay"; - /* CAN controller */ - spi@0 { - compatible = "nxp,mpc57xx"; + can-controller@0 { + compatible = "nxp,mpc5746c"; reg = <0>; - spi-max-frequency = <19200000>; + spi-max-frequency = <9600000>; + interrupt-parent = <&tlmm>; + interrupts = <78 0>; + reset-gpio = <&tlmm 71 GPIO_ACTIVE_LOW>; + bits-per-word = <8>; + reset-delay-msec = <100>; + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&can_rst_on>; + pinctrl-1 = <&can_rst_off>; }; }; @@ -400,12 +415,6 @@ status = "okay"; }; -&tlmm { - /* Set these up as hogs */ - pinctrl-names = "default"; - pinctrl-0 = <&can_reset_gpio>; -}; - &pm8994_gpios { gpio@c700 { /* GPIO 8 - WLAN_EN */ qcom,mode = <1>; /* Digital output*/ diff --git a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi index d800fdaae3de..244901bd5cef 100644 --- a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi @@ -2745,17 +2745,32 @@ }; }; - can_reset_gpio: can_reset_gpio { - mux { - pins = "gpio71"; - function = "gpio"; + can_reset { + can_rst_on: rst_on { + mux { + pins = "gpio71"; + function = "gpio"; + }; + + config { + pins = "gpio71"; + drive-strength = <2>; /* 2 mA */ + bias-pull-up; + }; }; - config { - pins = "gpio71"; - drive-strength = <2>; - output-high; - bias-pull-up; + can_rst_off: rst_off { + mux { + pins = "gpio71"; + function = "gpio"; + }; + + config { + pins = "gpio71"; + drive-strength = <2>; /* 2 mA */ + bias-pull-up; + output-high; + }; }; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi b/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi index 936dfd4d1cb2..1800a31dda9a 100644 --- a/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-regulator.dtsi @@ -1986,5 +1986,6 @@ maxim,vrange-sel = <0>; maxim,soft-start-slew-rate = <5500>; maxim,dvs-slew-rate = <5500>; + status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi index b0688668e667..11c45606f6c2 100644 --- a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi @@ -183,7 +183,7 @@ }; smmu_kms_unsec: qcom,smmu_kms_unsec_cb { - compatible = "qcom,smmu_kms_unsec"; + compatible = "qcom,smmu_sde_unsec"; iommus = <&mdp_smmu 0>; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-v3.dtsi b/arch/arm/boot/dts/qcom/msm8996-v3.dtsi index 7e5fa8a495c9..8e46ce5277b3 100644 --- a/arch/arm/boot/dts/qcom/msm8996-v3.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-v3.dtsi @@ -259,6 +259,71 @@ }; }; + qcom,gpu-pwrlevels-2 { + #address-cells = <1>; + #size-cells = <0>; + + qcom,speed-bin = <2>; + + qcom,initial-pwrlevel = <4>; + + qcom,gpu-pwrlevel@0 { + reg = <0>; + qcom,gpu-freq = <560000000>; + qcom,bus-freq = <11>; + qcom,bus-min = <11>; + qcom,bus-max = <11>; + }; + + qcom,gpu-pwrlevel@1 { + reg = <1>; + qcom,gpu-freq = <510000000>; + qcom,bus-freq = <9>; + qcom,bus-min = <8>; + qcom,bus-max = <10>; + }; + + qcom,gpu-pwrlevel@2 { + reg = <2>; + qcom,gpu-freq = <401800000>; + qcom,bus-freq = <8>; + qcom,bus-min = <7>; + qcom,bus-max = <9>; + }; + + qcom,gpu-pwrlevel@3 { + reg = <3>; + qcom,gpu-freq = <315000000>; + qcom,bus-freq = <6>; + qcom,bus-min = <5>; + qcom,bus-max = <7>; + }; + + qcom,gpu-pwrlevel@4 { + reg = <4>; + qcom,gpu-freq = <214000000>; + qcom,bus-freq = <4>; + qcom,bus-min = <3>; + qcom,bus-max = <5>; + }; + + qcom,gpu-pwrlevel@5 { + reg = <5>; + qcom,gpu-freq = <133000000>; + qcom,bus-freq = <3>; + qcom,bus-min = <2>; + qcom,bus-max = <4>; + }; + + qcom,gpu-pwrlevel@6 { + reg = <6>; + qcom,gpu-freq = <27000000>; + qcom,bus-freq = <0>; + qcom,bus-min = <0>; + qcom,bus-max = <0>; + + }; + }; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi index 505e325db1f5..b3c355481238 100644 --- a/arch/arm/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996.dtsi @@ -186,17 +186,9 @@ dev = "/dev/block/platform/soc/7464900.sdhci/by-name/vendor"; type = "ext4"; mnt_flags = "ro,barrier=1,discard"; - fsmgr_flags = "wait,verify"; + fsmgr_flags = "wait,slotselect"; status = "ok"; }; - system { - compatible = "android,system"; - dev = "/dev/block/platform/soc/7464900.sdhci/by-name/system"; - type = "ext4"; - mnt_flags = "ro,barrier=1,discard"; - fsmgr_flags = "wait,verify"; - status = "ok"; - }; }; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts index f5c33063643d..0126081e4b03 100644 --- a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts +++ b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -42,9 +42,6 @@ i2c@75b6000 { /* BLSP8 */ /* ADV7533 HDMI Bridge Chip removed on ADP Lite */ - adv7533@3d { - status = "disabled"; - }; adv7533@39 { status = "disabled"; }; @@ -59,6 +56,14 @@ }; }; +&dsi_adv_7533_2 { + /delete-property/ qcom,dsi-display-active; +}; + +&sde_kms { + connectors = <&sde_hdmi_tx &sde_hdmi &dsi_adv_7533_1>; +}; + &pil_modem { pinctrl-names = "default"; pinctrl-0 = <&modem_mux>; diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi b/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi index 15295639e361..f0fade10633e 100644 --- a/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi @@ -466,5 +466,11 @@ qcom,gpu-pwrlevels-0 { qcom,initial-pwrlevel = <1>; }; + + qcom,gpu-pwrlevels-2 { + qcom,initial-pwrlevel = <2>; + + }; + }; }; diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts b/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts deleted file mode 100644 index a95e9e4f272f..000000000000 --- a/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "msm8998-9x55.dtsi" -#include "msm8998-mdss-panels.dtsi" -#include "msm8998-mtp.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. MSM8998-9x55 MTP"; - compatible = "qcom,msm8998-9x55-mtp", "qcom,msm8998-9x55", "qcom,mtp"; - qcom,board-id= <8 6>; -}; diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts b/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts deleted file mode 100644 index 094ecbc50061..000000000000 --- a/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -/dts-v1/; - -#include "msm8998-9x55.dtsi" -#include "msm8998-mdss-panels.dtsi" -#include "msm8998-cdp.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. MSM8998-9x55 RCM"; - compatible = "qcom,msm8998-9x55-cdp", "qcom,msm8998-9x55", "qcom,cdp"; - qcom,board-id= <0x21 2>; -}; diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi b/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi deleted file mode 100644 index be947507e398..000000000000 --- a/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - - -#include "skeleton64.dtsi" -#include "msm8998-v2.1.dtsi" - -/ { - model = "Qualcomm Technologies, Inc. MSM8998-9x55"; - compatible = "qcom,msm8998-9x55"; - qcom,msm-id = <292 0x0>; - interrupt-parent = <&intc>; - - soc: soc { }; -}; diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi index 2095b4e07069..86b68b2440a9 100644 --- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi @@ -54,6 +54,13 @@ qcom,cam-vreg-op-mode = <0>; }; + laserled0: qcom,laserled@0 { + cell-index = <0>; + reg = <0x0>; + compatible = "qcom,laser-led"; + qcom,cci-master = <1>; + }; + actuator1: qcom,actuator@1 { cell-index = <1>; reg = <0x1>; @@ -322,6 +329,7 @@ qcom,eeprom-src = <&eeprom2>; qcom,led-flash-src = <&led_flash1>; qcom,actuator-src = <&actuator1>; + qcom,laserled-src = <&laserled0>; cam_vio-supply = <&pm8998_lvs1>; cam_vana-supply = <&pm8998_l22>; cam_vdig-supply = <&pm8998_s3>; diff --git a/arch/arm/boot/dts/qcom/msm8998-coresight.dtsi b/arch/arm/boot/dts/qcom/msm8998-coresight.dtsi index 4b81d2754255..24c91c6102a4 100644 --- a/arch/arm/boot/dts/qcom/msm8998-coresight.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-coresight.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1412,6 +1412,14 @@ <&tpda_spss_out_funnel_spss>; }; }; + port@2 { + reg = <1>; + funnel_spss_in_spss_etm0: endpoint { + slave-mode; + remote-endpoint = + <&spss_etm0_out_funnel_spss>; + }; + }; }; }; @@ -1598,4 +1606,17 @@ }; }; }; + + dummy-spss-etm0 { + compatible = "qcom,coresight-dummy"; + + coresight-name = "coresight-spss-etm0"; + + port{ + spss_etm0_out_funnel_spss: endpoint { + remote-endpoint = + <&funnel_spss_in_spss_etm0>; + }; + }; + }; }; diff --git a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi index ed1259918620..1abb28897fbd 100644 --- a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi @@ -582,7 +582,7 @@ config { pins = "gpio37"; drive-strength = <2>; - bias-pull-down; + bias-pull-up; }; }; @@ -595,7 +595,7 @@ config { pins = "gpio37"; drive-strength = <2>; - bias-disable; + bias-pull-up; }; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8998-sde.dtsi b/arch/arm/boot/dts/qcom/msm8998-sde.dtsi index 354ac830e0fa..f95ef2b84e2c 100644 --- a/arch/arm/boot/dts/qcom/msm8998-sde.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-sde.dtsi @@ -14,7 +14,7 @@ sde_kms: qcom,sde_kms@c900000 { compatible = "qcom,sde-kms"; reg = <0x0c900000 0x90000>, - <0x0c9b0000 0x1040>; + <0x0c9b0000 0x2008>; reg-names = "mdp_phys", "vbif_phys"; /* clock and supply entries */ @@ -52,23 +52,44 @@ /* hw blocks */ qcom,sde-off = <0x1000>; + qcom,sde-len = <0x458>; + qcom,sde-ctl-off = <0x2000 0x2200 0x2400 0x2600 0x2800>; + qcom,sde-ctl-size = <0x94>; + qcom,sde-mixer-off = <0x45000 0x46000 0x47000 0x48000 0x49000 0x4a000>; + qcom,sde-mixer-size = <0x31c>; + qcom,sde-dspp-off = <0x55000 0x57000>; + qcom,sde-dspp-size = <0x17e0>; + qcom,sde-wb-off = <0x66000>; + qcom,sde-wb-size = <0x2dc>; + qcom,sde-wb-id = <2>; qcom,sde-wb-xin-id = <6>; qcom,sde-wb-clk-ctrl = <0x2bc 0x10>; qcom,sde-intf-off = <0x6b000 0x6b800 0x6c000 0x6c800>; + qcom,sde-intf-size = <0x280>; + qcom,sde-intf-type = "dp", "dsi", "dsi", "hdmi"; + qcom,sde-pp-off = <0x71000 0x71800 - 0x72000 0x72800>; - qcom,sde-te2-off = <0x2000 0x2000 0x0 0x0>; + 0x72000 0x72800 0x73000>; + qcom,sde-pp-slave = <0x0 0x0 0x0 0x0 0x1>; + + qcom,sde-pp-size = <0xd4>; + + qcom,sde-te2-off = <0x2000 0x2000 0x0 0x0 0x0>; qcom,sde-cdm-off = <0x7a200>; - qcom,sde-dsc-off = <0x10000 0x10000 0x0 0x0>; + qcom,sde-cdm-size = <0x224>; + + qcom,sde-dsc-off = <0x81000 0x81400>; + qcom,sde-dsc-size = <0x140>; + qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>; qcom,sde-sspp-type = "vig", "vig", "vig", "vig", @@ -78,6 +99,7 @@ qcom,sde-sspp-off = <0x5000 0x7000 0x9000 0xb000 0x25000 0x27000 0x29000 0x2b000 0x35000 0x37000>; + qcom,sde-sspp-src-size = <0x1ac>; qcom,sde-sspp-xin-id = <0 4 8 12 1 5 9 13 2 10>; @@ -113,6 +135,7 @@ qcom,sde-sspp-vig-blocks { qcom,sde-vig-csc-off = <0x1a00>; qcom,sde-vig-qseed-off = <0xa00>; + qcom,sde-vig-qseed-size = <0xa0>; }; qcom,platform-supply-entries { diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi index acdd4bdcd95b..abc0247b4475 100644 --- a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi @@ -293,7 +293,8 @@ < 1900800 1525 >; cpu-to-dev-map-4 = < 2112000 1525 >, - < 2496000 5195 >; + < 2342400 5195 >, + < 2496000 13763 >; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi index eafa6b841c17..76e3282d327e 100644 --- a/arch/arm/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998.dtsi @@ -3106,6 +3106,8 @@ compatible = "qcom,wcn3990-wifi"; reg = <0x18800000 0x800000>; reg-names = "membase"; + clocks = <&clock_gcc clk_rf_clk2_pin>; + clock-names = "cxo_ref_clk_pin"; interrupts = <0 413 0 /* CE0 */ >, <0 414 0 /* CE1 */ >, @@ -3119,6 +3121,12 @@ <0 423 0 /* CE9 */ >, <0 424 0 /* CE10 */ >, <0 425 0 /* CE11 */ >; + vdd-0.8-cx-mx-supply = <&pm8998_l5>; + vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>; + vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>; + vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>; + qcom,vdd-0.8-cx-mx-config = <800000 800000>; + qcom,vdd-3.3-ch0-config = <3104000 3312000>; }; qcom,icnss@18800000 { diff --git a/arch/arm/boot/dts/qcom/sda630-cdp.dts b/arch/arm/boot/dts/qcom/sda630-cdp.dts index 665fa94d9713..beefb2de1ce9 100644 --- a/arch/arm/boot/dts/qcom/sda630-cdp.dts +++ b/arch/arm/boot/dts/qcom/sda630-cdp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sda630-cdp", "qcom,sda630", "qcom,cdp"; qcom,board-id = <1 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda630-mtp.dts b/arch/arm/boot/dts/qcom/sda630-mtp.dts index 08a996ddb709..41afa720c389 100644 --- a/arch/arm/boot/dts/qcom/sda630-mtp.dts +++ b/arch/arm/boot/dts/qcom/sda630-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sda630-mtp", "qcom,sda630", "qcom,mtp"; qcom,board-id = <8 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda630-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sda630-pm660a-cdp.dts index 6094d22c1c92..01455d054e03 100644 --- a/arch/arm/boot/dts/qcom/sda630-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sda630-pm660a-cdp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDA 630 PM660 + PM660A CDP"; compatible = "qcom,sda630-cdp", "qcom,sda630", "qcom,cdp"; qcom,board-id = <1 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda630-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sda630-pm660a-mtp.dts index 49c10129aada..3dd15d889afd 100644 --- a/arch/arm/boot/dts/qcom/sda630-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sda630-pm660a-mtp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDA 630 PM660 + PM660A MTP"; compatible = "qcom,sda630-mtp", "qcom,sda630", "qcom,mtp"; qcom,board-id = <8 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts index 4c4c758daa29..155099b5be9d 100644 --- a/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts +++ b/arch/arm/boot/dts/qcom/sda630-pm660a-qrd-hdk.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDA 630 PM660 + PM660A QRD HDK630"; compatible = "qcom,sda630-qrd", "qcom,sda630", "qcom,qrd"; qcom,board-id = <0x0016000b 0x00>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &pm660a_oledb { diff --git a/arch/arm/boot/dts/qcom/sda636-cdp.dts b/arch/arm/boot/dts/qcom/sda636-cdp.dts index a4cc4b3f4662..2962a6bc3691 100644 --- a/arch/arm/boot/dts/qcom/sda636-cdp.dts +++ b/arch/arm/boot/dts/qcom/sda636-cdp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sda636-cdp", "qcom,sda636", "qcom,cdp"; qcom,board-id = <1 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda636-mtp.dts b/arch/arm/boot/dts/qcom/sda636-mtp.dts index 3fd19437b3eb..237d27791b94 100644 --- a/arch/arm/boot/dts/qcom/sda636-mtp.dts +++ b/arch/arm/boot/dts/qcom/sda636-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sda636-mtp", "qcom,sda636", "qcom,mtp"; qcom,board-id = <8 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda636-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sda636-pm660a-cdp.dts index 72db1be68c83..5f0e5275e828 100644 --- a/arch/arm/boot/dts/qcom/sda636-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sda636-pm660a-cdp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDA 636 PM660 + PM660A CDP"; compatible = "qcom,sda636-cdp", "qcom,sda636", "qcom,cdp"; qcom,board-id = <1 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda636-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sda636-pm660a-mtp.dts index fd5b94420102..c15bc159dffd 100644 --- a/arch/arm/boot/dts/qcom/sda636-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sda636-pm660a-mtp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDA 636 PM660 + PM660A MTP"; compatible = "qcom,sda636-mtp", "qcom,sda636", "qcom,mtp"; qcom,board-id = <8 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts index 3535c54f5dd0..ccc1be75f39b 100644 --- a/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts +++ b/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts @@ -98,7 +98,7 @@ }; }; - smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 { + smb1381_charger: qcom,smb1381-charger@1000 { compatible = "qcom,smb138x-parallel-slave"; qcom,pmic-revid = <&smb138x_revid>; reg = <0x1000 0x700>; @@ -158,7 +158,9 @@ model = "Qualcomm Technologies, Inc. SDA 636 PM660 + PM660A QRD HDK636"; compatible = "qcom,sda636-qrd", "qcom,sda636", "qcom,qrd"; qcom,board-id = <0x0016000b 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &pm660a_oledb { diff --git a/arch/arm/boot/dts/qcom/sda658-cdp.dts b/arch/arm/boot/dts/qcom/sda658-cdp.dts index 9992963b8705..5db30e379a2d 100644 --- a/arch/arm/boot/dts/qcom/sda658-cdp.dts +++ b/arch/arm/boot/dts/qcom/sda658-cdp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,6 @@ compatible = "qcom,sda658-cdp", "qcom,sda658", "qcom,cdp"; qcom,board-id = <1 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sda658-mtp.dts b/arch/arm/boot/dts/qcom/sda658-mtp.dts index f4322ecfd701..138c6fdc74df 100644 --- a/arch/arm/boot/dts/qcom/sda658-mtp.dts +++ b/arch/arm/boot/dts/qcom/sda658-mtp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,6 @@ compatible = "qcom,sda658-mtp", "qcom,sda658", "qcom,mtp"; qcom,board-id = <8 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sda658-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sda658-pm660a-cdp.dts index c280c4afda51..b4e7996e0bb6 100644 --- a/arch/arm/boot/dts/qcom/sda658-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sda658-pm660a-cdp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,7 @@ model = "Qualcomm Technologies, Inc. SDA 658 PM660 + PM660A CDP"; compatible = "qcom,sda658-cdp", "qcom,sda658", "qcom,cdp"; qcom,board-id = <1 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sda658-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sda658-pm660a-mtp.dts index ba8741e2a068..52820a557ecf 100644 --- a/arch/arm/boot/dts/qcom/sda658-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sda658-pm660a-mtp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,7 @@ model = "Qualcomm Technologies, Inc. SDA 658 PM660 + PM660A MTP"; compatible = "qcom,sda658-mtp", "qcom,sda658", "qcom,mtp"; qcom,board-id = <8 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sda660-cdp.dts b/arch/arm/boot/dts/qcom/sda660-cdp.dts index 92097729087b..4299a0957455 100644 --- a/arch/arm/boot/dts/qcom/sda660-cdp.dts +++ b/arch/arm/boot/dts/qcom/sda660-cdp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sda660-cdp", "qcom,sda660", "qcom,cdp"; qcom,board-id = <1 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda660-mtp.dts b/arch/arm/boot/dts/qcom/sda660-mtp.dts index 027137e54c0e..71263375b1c0 100644 --- a/arch/arm/boot/dts/qcom/sda660-mtp.dts +++ b/arch/arm/boot/dts/qcom/sda660-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sda660-mtp", "qcom,sda660", "qcom,mtp"; qcom,board-id = <8 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda660-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sda660-pm660a-cdp.dts index a46083a00298..38900dab7596 100644 --- a/arch/arm/boot/dts/qcom/sda660-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sda660-pm660a-cdp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDA 660 PM660 + PM660A CDP"; compatible = "qcom,sda660-cdp", "qcom,sda660", "qcom,cdp"; qcom,board-id = <1 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda660-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sda660-pm660a-mtp.dts index d94cf8ea1eb5..98ab098d6cdc 100644 --- a/arch/arm/boot/dts/qcom/sda660-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sda660-pm660a-mtp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDA 660 PM660 + PM660A MTP"; compatible = "qcom,sda660-mtp", "qcom,sda660", "qcom,mtp"; qcom,board-id = <8 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts index 0f4b462fd57b..0d7b6c0341b5 100644 --- a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts +++ b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts @@ -98,7 +98,7 @@ }; }; - smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 { + smb1381_charger: qcom,smb1381-charger@1000 { compatible = "qcom,smb138x-parallel-slave"; qcom,pmic-revid = <&smb138x_revid>; reg = <0x1000 0x700>; @@ -158,7 +158,9 @@ model = "Qualcomm Technologies, Inc. SDA 660 PM660 + PM660A QRD HDK660"; compatible = "qcom,sda660-qrd", "qcom,sda660", "qcom,qrd"; qcom,board-id = <0x0016000b 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &pm660a_oledb { diff --git a/arch/arm/boot/dts/qcom/sdm630-cdp.dts b/arch/arm/boot/dts/qcom/sdm630-cdp.dts index 973df0df3be5..8da3dd6726e6 100644 --- a/arch/arm/boot/dts/qcom/sdm630-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm630-cdp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp"; qcom,board-id = <1 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-cdp.dts b/arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-cdp.dts index 4db377dc755a..27ef94e8a29a 100644 --- a/arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-cdp.dts @@ -22,5 +22,6 @@ compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp"; qcom,board-id = <1 2>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm630-internal-codec-cdp.dts b/arch/arm/boot/dts/qcom/sdm630-internal-codec-cdp.dts index baa55fa15160..39099ed1e97e 100644 --- a/arch/arm/boot/dts/qcom/sdm630-internal-codec-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm630-internal-codec-cdp.dts @@ -22,5 +22,6 @@ compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp"; qcom,board-id = <1 1>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm630-internal-codec-mtp.dts b/arch/arm/boot/dts/qcom/sdm630-internal-codec-mtp.dts index b469a59d7818..d9f30b41ed8e 100644 --- a/arch/arm/boot/dts/qcom/sdm630-internal-codec-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm630-internal-codec-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm630-mtp", "qcom,sdm630", "qcom,mtp"; qcom,board-id = <8 1>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &int_codec { diff --git a/arch/arm/boot/dts/qcom/sdm630-internal-codec-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sdm630-internal-codec-pm660a-cdp.dts index c4e71835b701..0ec2e17f10a5 100644 --- a/arch/arm/boot/dts/qcom/sdm630-internal-codec-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm630-internal-codec-pm660a-cdp.dts @@ -22,5 +22,7 @@ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660A Int. Audio Codec CDP"; compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp"; qcom,board-id = <1 1>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm630-internal-codec-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sdm630-internal-codec-pm660a-mtp.dts index e11cdfbed668..d4f1214dc191 100644 --- a/arch/arm/boot/dts/qcom/sdm630-internal-codec-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm630-internal-codec-pm660a-mtp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660A Int. Audio Codec MTP"; compatible = "qcom,sdm630-mtp", "qcom,sdm630", "qcom,mtp"; qcom,board-id = <8 1>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &int_codec { diff --git a/arch/arm/boot/dts/qcom/sdm630-mtp.dts b/arch/arm/boot/dts/qcom/sdm630-mtp.dts index b1a9bb86149d..3604ce486633 100644 --- a/arch/arm/boot/dts/qcom/sdm630-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm630-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm630-mtp", "qcom,sdm630", "qcom,mtp"; qcom,board-id = <8 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sdm630-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sdm630-pm660a-cdp.dts index 7e3e9a0cca59..a6f01eaa2a43 100644 --- a/arch/arm/boot/dts/qcom/sdm630-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm630-pm660a-cdp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660A CDP"; compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp"; qcom,board-id = <1 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-cdp.dts b/arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-cdp.dts index 15936f47da7b..1596f7533581 100644 --- a/arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-cdp.dts @@ -22,5 +22,7 @@ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660A, Headset Jacktype NO, CDP"; compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp"; qcom,board-id = <1 2>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm630-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sdm630-pm660a-mtp.dts index a522b7ad1d5f..418b4a1e819f 100644 --- a/arch/arm/boot/dts/qcom/sdm630-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm630-pm660a-mtp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660A MTP"; compatible = "qcom,sdm630-mtp", "qcom,sdm630", "qcom,mtp"; qcom,board-id = <8 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts b/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts index deb10b591444..95b03f68b30f 100644 --- a/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts +++ b/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660A QRD"; compatible = "qcom,sdm630-qrd", "qcom,sdm630", "qcom,qrd"; qcom,board-id = <0x0002000b 0x00>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &int_codec { diff --git a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi index c24a41656f3a..384e24d221c4 100644 --- a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi @@ -92,7 +92,7 @@ }; }; - smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 { + smb1381_charger: qcom,smb1381-charger@1000 { compatible = "qcom,smb138x-parallel-slave"; qcom,pmic-revid = <&smb138x_revid>; reg = <0x1000 0x700>; @@ -399,9 +399,9 @@ &qusb_phy0 { qcom,qusb-phy-init-seq = <0xf8 0x80 - 0x80 0x84 + 0x83 0x84 0x83 0x88 - 0xc7 0x8c + 0xc3 0x8c 0x30 0x08 0x79 0x0c 0x21 0x10 diff --git a/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts b/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts index eb089b524fef..309e9c6730a9 100644 --- a/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm630-mtp", "qcom,sdm630", "qcom,mtp"; qcom,board-id = <8 2>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi index 5618f02e34f2..e918864a3df7 100644 --- a/arch/arm/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630.dtsi @@ -1366,7 +1366,6 @@ qcom,up-timer = <1000 1000>; qcom,down-timer = <1000 1000>; - qcom,pc-override-index = <0 0>; qcom,set-ret-inactive; qcom,enable-llm-freq-vote; qcom,llm-freq-up-timer = <327675 327675>; diff --git a/arch/arm/boot/dts/qcom/sdm636-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm636-camera-sensor-mtp.dtsi new file mode 100644 index 000000000000..ab6a7aebd6b9 --- /dev/null +++ b/arch/arm/boot/dts/qcom/sdm636-camera-sensor-mtp.dtsi @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&cci { + actuator0: qcom,actuator@0 { + cell-index = <0>; + reg = <0x0>; + compatible = "qcom,actuator"; + qcom,cci-master = <0>; + cam_vaf-supply = <&pm660l_l8>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <2800000>; + qcom,cam-vreg-max-voltage = <3400000>; + qcom,cam-vreg-op-mode = <100000>; + }; + + actuator1: qcom,actuator@1 { + cell-index = <1>; + reg = <0x1>; + compatible = "qcom,actuator"; + qcom,cci-master = <1>; + cam_vaf-supply = <&pm660l_l8>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <2800000>; + qcom,cam-vreg-max-voltage = <3400000>; + qcom,cam-vreg-op-mode = <100000>; + }; + + actuator2: qcom,actuator@2 { + cell-index = <2>; + reg = <0x2>; + compatible = "qcom,actuator"; + qcom,cci-master = <1>; + cam_vaf-supply = <&pm660l_l8>; + qcom,cam-vreg-name = "cam_vaf"; + qcom,cam-vreg-min-voltage = <2800000>; + qcom,cam-vreg-max-voltage = <3400000>; + qcom,cam-vreg-op-mode = <100000>; + }; +}; diff --git a/arch/arm/boot/dts/qcom/sdm636-cdp.dts b/arch/arm/boot/dts/qcom/sdm636-cdp.dts index 47f5eba8f491..ad1cac9c0c90 100644 --- a/arch/arm/boot/dts/qcom/sdm636-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm636-cdp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm636-cdp", "qcom,sdm636", "qcom,cdp"; qcom,board-id = <1 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sdm636-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm636-cdp.dtsi index 89d42209b34c..279a542be7e4 100644 --- a/arch/arm/boot/dts/qcom/sdm636-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/sdm636-cdp.dtsi @@ -14,3 +14,14 @@ / { }; +&mdss_dsi { + hw-config = "single_dsi"; +}; + +&mdss_dsi0 { + qcom,dsi-pref-prim-pan = <&dsi_nt35695b_truly_fhd_video>; +}; + +&mdss_dsi1 { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/qcom/sdm636-headset-jacktype-no-cdp.dts b/arch/arm/boot/dts/qcom/sdm636-headset-jacktype-no-cdp.dts index 4d47ac1ef517..c87ace6938bd 100644 --- a/arch/arm/boot/dts/qcom/sdm636-headset-jacktype-no-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm636-headset-jacktype-no-cdp.dts @@ -22,5 +22,6 @@ compatible = "qcom,sdm636-cdp", "qcom,sdm636", "qcom,cdp"; qcom,board-id = <1 2>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm636-internal-codec-cdp.dts b/arch/arm/boot/dts/qcom/sdm636-internal-codec-cdp.dts index 725e9739a487..1de6c4c13300 100644 --- a/arch/arm/boot/dts/qcom/sdm636-internal-codec-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm636-internal-codec-cdp.dts @@ -22,5 +22,6 @@ compatible = "qcom,sdm636-cdp", "qcom,sdm636", "qcom,cdp"; qcom,board-id = <1 1>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm636-internal-codec-mtp.dts b/arch/arm/boot/dts/qcom/sdm636-internal-codec-mtp.dts index 3630e329b13d..3522e67cf87e 100644 --- a/arch/arm/boot/dts/qcom/sdm636-internal-codec-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm636-internal-codec-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm636-mtp", "qcom,sdm636", "qcom,mtp"; qcom,board-id = <8 1>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &int_codec { diff --git a/arch/arm/boot/dts/qcom/sdm636-internal-codec-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sdm636-internal-codec-pm660a-cdp.dts index 416cd3754a10..e51c2e4709d2 100644 --- a/arch/arm/boot/dts/qcom/sdm636-internal-codec-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm636-internal-codec-pm660a-cdp.dts @@ -22,5 +22,7 @@ model = "Qualcomm Technologies, Inc. SDM 636 PM660 + PM660A Int. Audio Codec CDP"; compatible = "qcom,sdm636-cdp", "qcom,sdm636", "qcom,cdp"; qcom,board-id = <1 1>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm636-internal-codec-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sdm636-internal-codec-pm660a-mtp.dts index e9ad9048c6e9..e4d980a15a0d 100644 --- a/arch/arm/boot/dts/qcom/sdm636-internal-codec-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm636-internal-codec-pm660a-mtp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 636 PM660 + PM660A Int. Audio Codec MTP"; compatible = "qcom,sdm636-mtp", "qcom,sdm636", "qcom,mtp"; qcom,board-id = <8 1>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &int_codec { diff --git a/arch/arm/boot/dts/qcom/sdm636-mtp.dts b/arch/arm/boot/dts/qcom/sdm636-mtp.dts index 7f08e44148b5..8ee5c46c804a 100644 --- a/arch/arm/boot/dts/qcom/sdm636-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm636-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm636-mtp", "qcom,sdm636", "qcom,mtp"; qcom,board-id = <8 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sdm636-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm636-mtp.dtsi index 4364db2bc724..30174df3bd6a 100644 --- a/arch/arm/boot/dts/qcom/sdm636-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/sdm636-mtp.dtsi @@ -11,6 +11,18 @@ */ #include "sdm660-mtp.dtsi" +#include "sdm636-camera-sensor-mtp.dtsi" / { }; +&mdss_dsi { + hw-config = "single_dsi"; +}; + +&mdss_dsi0 { + qcom,dsi-pref-prim-pan = <&dsi_nt35695b_truly_fhd_video>; +}; + +&mdss_dsi1 { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/qcom/sdm636-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sdm636-pm660a-cdp.dts index 84a28770d5a8..88d19d0c7bb1 100644 --- a/arch/arm/boot/dts/qcom/sdm636-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm636-pm660a-cdp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 636 PM660 + PM660A CDP"; compatible = "qcom,sdm636-cdp", "qcom,sdm636", "qcom,cdp"; qcom,board-id = <1 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &mdss_dsi { diff --git a/arch/arm/boot/dts/qcom/sdm636-pm660a-headset-jacktype-no-cdp.dts b/arch/arm/boot/dts/qcom/sdm636-pm660a-headset-jacktype-no-cdp.dts index b2517e32ae26..b60fdc773941 100644 --- a/arch/arm/boot/dts/qcom/sdm636-pm660a-headset-jacktype-no-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm636-pm660a-headset-jacktype-no-cdp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 636 PM660 + PM660A, Headset Jacktype NO, CDP"; compatible = "qcom,sdm636-cdp", "qcom,sdm636", "qcom,cdp"; qcom,board-id = <1 2>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &mdss_dsi0 { diff --git a/arch/arm/boot/dts/qcom/sdm636-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sdm636-pm660a-mtp.dts index 966a00af7216..6aaa8d070b42 100644 --- a/arch/arm/boot/dts/qcom/sdm636-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm636-pm660a-mtp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 636 PM660 + PM660A MTP"; compatible = "qcom,sdm636-mtp", "qcom,sdm636", "qcom,mtp"; qcom,board-id = <8 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &mdss_dsi { diff --git a/arch/arm/boot/dts/qcom/sdm636-pm660a-qrd.dts b/arch/arm/boot/dts/qcom/sdm636-pm660a-qrd.dts index d8b6c744a204..68734c9cd8c6 100644 --- a/arch/arm/boot/dts/qcom/sdm636-pm660a-qrd.dts +++ b/arch/arm/boot/dts/qcom/sdm636-pm660a-qrd.dts @@ -21,7 +21,9 @@ model = "Qualcomm Technologies, Inc. SDM 636 PM660 + PM660A QRD"; compatible = "qcom,sdm636-qrd", "qcom,sdm636", "qcom,qrd"; qcom,board-id = <0x0012000b 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &pm660a_oledb { diff --git a/arch/arm/boot/dts/qcom/sdm636-qrd.dts b/arch/arm/boot/dts/qcom/sdm636-qrd.dts index a2d22f281a5d..b1977cdc50bc 100644 --- a/arch/arm/boot/dts/qcom/sdm636-qrd.dts +++ b/arch/arm/boot/dts/qcom/sdm636-qrd.dts @@ -21,7 +21,8 @@ compatible = "qcom,sdm636-qrd", "qcom,sdm636", "qcom,qrd"; qcom,board-id = <0x1000b 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &mdss_mdp { diff --git a/arch/arm/boot/dts/qcom/sdm636-usbc-audio-mtp.dts b/arch/arm/boot/dts/qcom/sdm636-usbc-audio-mtp.dts index 90879cdf35cc..8519a4891446 100644 --- a/arch/arm/boot/dts/qcom/sdm636-usbc-audio-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm636-usbc-audio-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm636-mtp", "qcom,sdm636", "qcom,mtp"; qcom,board-id = <8 2>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sdm636.dtsi b/arch/arm/boot/dts/qcom/sdm636.dtsi index 227e92fb3893..8024eee279cb 100644 --- a/arch/arm/boot/dts/qcom/sdm636.dtsi +++ b/arch/arm/boot/dts/qcom/sdm636.dtsi @@ -31,6 +31,14 @@ &soc { /delete-node/ qcom,turing@1a300000; + /delete-node/ cti@7068000; + /delete-node/ turing_etm0; + funnel@6042000 { + ports { + /delete-node/ port@4; + }; + }; + devfreq_memlat_4: qcom,arm-memlat-mon-4 { qcom,core-dev-table = < 1113600 762 >, diff --git a/arch/arm/boot/dts/qcom/sdm658-cdp.dts b/arch/arm/boot/dts/qcom/sdm658-cdp.dts index 8569af157049..223177aece50 100644 --- a/arch/arm/boot/dts/qcom/sdm658-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm658-cdp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,6 @@ compatible = "qcom,sdm658-cdp", "qcom,sdm658", "qcom,cdp"; qcom,board-id = <1 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm658-internal-codec-cdp.dts b/arch/arm/boot/dts/qcom/sdm658-internal-codec-cdp.dts index d0f5c14223ff..c6cdf6e797af 100644 --- a/arch/arm/boot/dts/qcom/sdm658-internal-codec-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm658-internal-codec-cdp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,7 +21,8 @@ compatible = "qcom,sdm658-cdp", "qcom,sdm658", "qcom,cdp"; qcom,board-id = <1 1>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &slim_aud { diff --git a/arch/arm/boot/dts/qcom/sdm658-internal-codec-mtp.dts b/arch/arm/boot/dts/qcom/sdm658-internal-codec-mtp.dts index acec15e0615f..32203c9ccaee 100644 --- a/arch/arm/boot/dts/qcom/sdm658-internal-codec-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm658-internal-codec-mtp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,7 +21,8 @@ compatible = "qcom,sdm658-mtp", "qcom,sdm658", "qcom,mtp"; qcom,board-id = <8 1>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &slim_aud { diff --git a/arch/arm/boot/dts/qcom/sdm658-internal-codec-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sdm658-internal-codec-pm660a-cdp.dts index b7f2e70ce962..a8d3de6f8962 100644 --- a/arch/arm/boot/dts/qcom/sdm658-internal-codec-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm658-internal-codec-pm660a-cdp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,7 @@ model = "Qualcomm Technologies, Inc. SDM 658 PM660 + PM660A Int. Audio Codec CDP"; compatible = "qcom,sdm658-cdp", "qcom,sdm658", "qcom,cdp"; qcom,board-id = <1 1>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm658-internal-codec-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sdm658-internal-codec-pm660a-mtp.dts index 949d7ae7faa5..2689dc099f12 100644 --- a/arch/arm/boot/dts/qcom/sdm658-internal-codec-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm658-internal-codec-pm660a-mtp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,7 @@ model = "Qualcomm Technologies, Inc. SDM 658 PM660 + PM660A Int. Audio Codec MTP"; compatible = "qcom,sdm658-mtp", "qcom,sdm658", "qcom,mtp"; qcom,board-id = <8 1>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm658-mtp.dts b/arch/arm/boot/dts/qcom/sdm658-mtp.dts index 2fbe9b0a6201..a57bba3b22af 100644 --- a/arch/arm/boot/dts/qcom/sdm658-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm658-mtp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,6 @@ compatible = "qcom,sdm658-mtp", "qcom,sdm658", "qcom,mtp"; qcom,board-id = <8 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm658-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sdm658-pm660a-cdp.dts index 39e2df958347..c719d76613b0 100644 --- a/arch/arm/boot/dts/qcom/sdm658-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm658-pm660a-cdp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,7 @@ model = "Qualcomm Technologies, Inc. SDM 658 PM660 + PM660A CDP"; compatible = "qcom,sdm658-cdp", "qcom,sdm658", "qcom,cdp"; qcom,board-id = <1 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm658-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sdm658-pm660a-mtp.dts index 4d205ef403f4..4b6b7fa23710 100644 --- a/arch/arm/boot/dts/qcom/sdm658-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm658-pm660a-mtp.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,7 @@ model = "Qualcomm Technologies, Inc. SDM 658 PM660 + PM660A MTP"; compatible = "qcom,sdm658-mtp", "qcom,sdm658", "qcom,mtp"; qcom,board-id = <8 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm658-pm660a-qrd.dts b/arch/arm/boot/dts/qcom/sdm658-pm660a-qrd.dts index f1edf6b244a8..cd51a74ba18c 100644 --- a/arch/arm/boot/dts/qcom/sdm658-pm660a-qrd.dts +++ b/arch/arm/boot/dts/qcom/sdm658-pm660a-qrd.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,7 @@ model = "Qualcomm Technologies, Inc. SDM 658 PM660 + PM660A QRD"; compatible = "qcom,sdm658-qrd", "qcom,sdm658", "qcom,qrd"; qcom,board-id = <0x1000b 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm658-qrd.dts b/arch/arm/boot/dts/qcom/sdm658-qrd.dts index bd7d76ee1f6c..155b730d52c3 100644 --- a/arch/arm/boot/dts/qcom/sdm658-qrd.dts +++ b/arch/arm/boot/dts/qcom/sdm658-qrd.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,5 +21,6 @@ compatible = "qcom,sdm658-qrd", "qcom,sdm658", "qcom,qrd"; qcom,board-id = <0x1000b 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm660-cdp.dts b/arch/arm/boot/dts/qcom/sdm660-cdp.dts index 7b4b68af6188..1b634d7aaf56 100644 --- a/arch/arm/boot/dts/qcom/sdm660-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm660-cdp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm660-cdp", "qcom,sdm660", "qcom,cdp"; qcom,board-id = <1 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi index fecb86dcfdeb..2194cf606d29 100644 --- a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi @@ -506,6 +506,51 @@ qcom,bus-max = <0>; }; }; + + qcom,gpu-pwrlevels-4 { + #address-cells = <1>; + #size-cells = <0>; + + qcom,speed-bin = <78>; + + qcom,initial-pwrlevel = <1>; + + /* SVS */ + qcom,gpu-pwrlevel@0 { + reg = <0>; + qcom,gpu-freq = <370000000>; + qcom,bus-freq = <8>; + qcom,bus-min = <6>; + qcom,bus-max = <11>; + }; + + /* Low SVS */ + qcom,gpu-pwrlevel@1 { + reg = <1>; + qcom,gpu-freq = <266000000>; + qcom,bus-freq = <3>; + qcom,bus-min = <3>; + qcom,bus-max = <6>; + }; + + /* Min SVS */ + qcom,gpu-pwrlevel@2 { + reg = <2>; + qcom,gpu-freq = <160000000>; + qcom,bus-freq = <3>; + qcom,bus-min = <3>; + qcom,bus-max = <5>; + }; + + /* XO */ + qcom,gpu-pwrlevel@3 { + reg = <3>; + qcom,gpu-freq = <19200000>; + qcom,bus-freq = <0>; + qcom,bus-min = <0>; + qcom,bus-max = <0>; + }; + }; }; }; diff --git a/arch/arm/boot/dts/qcom/sdm660-headset-jacktype-no-cdp.dts b/arch/arm/boot/dts/qcom/sdm660-headset-jacktype-no-cdp.dts index 48cfefb4cdd0..76755d99f6df 100644 --- a/arch/arm/boot/dts/qcom/sdm660-headset-jacktype-no-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm660-headset-jacktype-no-cdp.dts @@ -22,5 +22,6 @@ compatible = "qcom,sdm660-cdp", "qcom,sdm660", "qcom,cdp"; qcom,board-id = <1 2>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm660-internal-codec-cdp.dts b/arch/arm/boot/dts/qcom/sdm660-internal-codec-cdp.dts index 6755385313b1..a7020158c3a5 100644 --- a/arch/arm/boot/dts/qcom/sdm660-internal-codec-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm660-internal-codec-cdp.dts @@ -22,5 +22,6 @@ compatible = "qcom,sdm660-cdp", "qcom,sdm660", "qcom,cdp"; qcom,board-id = <1 1>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm660-internal-codec-mtp.dts b/arch/arm/boot/dts/qcom/sdm660-internal-codec-mtp.dts index 39da13e7565b..4aeb561e7755 100644 --- a/arch/arm/boot/dts/qcom/sdm660-internal-codec-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm660-internal-codec-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm660-mtp", "qcom,sdm660", "qcom,mtp"; qcom,board-id = <8 1>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &int_codec { diff --git a/arch/arm/boot/dts/qcom/sdm660-internal-codec-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sdm660-internal-codec-pm660a-cdp.dts index caf8af514237..780330c80565 100644 --- a/arch/arm/boot/dts/qcom/sdm660-internal-codec-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm660-internal-codec-pm660a-cdp.dts @@ -22,5 +22,7 @@ model = "Qualcomm Technologies, Inc. SDM 660 PM660 + PM660A Int. Audio Codec CDP"; compatible = "qcom,sdm660-cdp", "qcom,sdm660", "qcom,cdp"; qcom,board-id = <1 1>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; diff --git a/arch/arm/boot/dts/qcom/sdm660-internal-codec-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sdm660-internal-codec-pm660a-mtp.dts index d2ae22879ef4..b9c8c900a973 100644 --- a/arch/arm/boot/dts/qcom/sdm660-internal-codec-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm660-internal-codec-pm660a-mtp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 660 PM660 + PM660A Int. Audio Codec MTP"; compatible = "qcom,sdm660-mtp", "qcom,sdm660", "qcom,mtp"; qcom,board-id = <8 1>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &int_codec { diff --git a/arch/arm/boot/dts/qcom/sdm660-mtp.dts b/arch/arm/boot/dts/qcom/sdm660-mtp.dts index 72bfa20b2bf9..32b294ee6883 100644 --- a/arch/arm/boot/dts/qcom/sdm660-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm660-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm660-mtp", "qcom,sdm660", "qcom,mtp"; qcom,board-id = <8 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sdm660-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sdm660-pm660a-cdp.dts index c27f76d3027b..c20318c26373 100644 --- a/arch/arm/boot/dts/qcom/sdm660-pm660a-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm660-pm660a-cdp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 660 PM660 + PM660A CDP"; compatible = "qcom,sdm660-cdp", "qcom,sdm660", "qcom,cdp"; qcom,board-id = <1 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &mdss_dsi { diff --git a/arch/arm/boot/dts/qcom/sdm660-pm660a-headset-jacktype-no-cdp.dts b/arch/arm/boot/dts/qcom/sdm660-pm660a-headset-jacktype-no-cdp.dts index 281af3b1768e..5af607c0e1cc 100644 --- a/arch/arm/boot/dts/qcom/sdm660-pm660a-headset-jacktype-no-cdp.dts +++ b/arch/arm/boot/dts/qcom/sdm660-pm660a-headset-jacktype-no-cdp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 660 PM660 + PM660A, Headset Jacktype NO, CDP"; compatible = "qcom,sdm660-cdp", "qcom,sdm660", "qcom,cdp"; qcom,board-id = <1 2>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &mdss_dsi0 { diff --git a/arch/arm/boot/dts/qcom/sdm660-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sdm660-pm660a-mtp.dts index eb5e4999fb67..3aaa839e18be 100644 --- a/arch/arm/boot/dts/qcom/sdm660-pm660a-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm660-pm660a-mtp.dts @@ -22,7 +22,9 @@ model = "Qualcomm Technologies, Inc. SDM 660 PM660 + PM660A MTP"; compatible = "qcom,sdm660-mtp", "qcom,sdm660", "qcom,mtp"; qcom,board-id = <8 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &mdss_dsi { diff --git a/arch/arm/boot/dts/qcom/sdm660-pm660a-qrd.dts b/arch/arm/boot/dts/qcom/sdm660-pm660a-qrd.dts index d9d74ea31d3d..848be11dc0a6 100644 --- a/arch/arm/boot/dts/qcom/sdm660-pm660a-qrd.dts +++ b/arch/arm/boot/dts/qcom/sdm660-pm660a-qrd.dts @@ -21,7 +21,9 @@ model = "Qualcomm Technologies, Inc. SDM 660 PM660 + PM660A QRD"; compatible = "qcom,sdm660-qrd", "qcom,sdm660", "qcom,qrd"; qcom,board-id = <0x0012000b 0>; - qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>; + qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>, + <0x0001001b 0x0002001a 0x0 0x0>, + <0x0001001b 0x0202001a 0x0 0x0>; }; &pm660a_oledb { diff --git a/arch/arm/boot/dts/qcom/sdm660-qrd.dts b/arch/arm/boot/dts/qcom/sdm660-qrd.dts index 4d120e83cb9b..3284e805a093 100644 --- a/arch/arm/boot/dts/qcom/sdm660-qrd.dts +++ b/arch/arm/boot/dts/qcom/sdm660-qrd.dts @@ -21,7 +21,8 @@ compatible = "qcom,sdm660-qrd", "qcom,sdm660", "qcom,qrd"; qcom,board-id = <0x1000b 0>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &mdss_mdp { diff --git a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi index e78c2474df4d..b1408cc295e8 100644 --- a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi @@ -121,9 +121,9 @@ &qusb_phy0 { qcom,qusb-phy-init-seq = <0xf8 0x80 - 0x80 0x84 + 0x83 0x84 0x83 0x88 - 0xc7 0x8c + 0xc3 0x8c 0x30 0x08 0x79 0x0c 0x21 0x10 diff --git a/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi b/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi index a4111f6d1b94..6556c986ae75 100644 --- a/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi @@ -700,11 +700,33 @@ regulator-max-microvolt = <8>; qcom,cpr-fuse-corners = <5>; - qcom,cpr-fuse-combos = <16>; - qcom,cpr-speed-bins = <2>; - qcom,cpr-speed-bin-corners = <8 8>; - qcom,cpr-corners = <8>; - qcom,cpr-corner-fmax-map = <2 3 4 5 8>; + qcom,cpr-fuse-combos = <32>; + qcom,cpr-speed-bins = <4>; + qcom,cpr-speed-bin-corners = <8 8 0 8>; + qcom,cpr-corners = + /* Speed bin 0 */ + <8 8 8 8 8 8 8 8>, + + /* Speed bin 1 */ + <8 8 8 8 8 8 8 8>, + + /* Speed bin 2 */ + <0 0 0 0 0 0 0 0>, + + /* Speed bin 3 */ + <8 8 8 8 8 8 8 8>; + qcom,cpr-corner-fmax-map = + /* Speed bin 0 */ + <2 3 4 5 8>, + + /* Speed bin 1 */ + <2 3 4 5 8>, + + /* Speed bin 2 */ + <0 0 0 0 0>, + + /* Speed bin 3 */ + <2 3 4 5 8>; qcom,cpr-voltage-ceiling = < 724000 724000 724000 788000 868000 @@ -715,9 +737,20 @@ 744000 784000 844000>; qcom,corner-frequencies = + /* Speed bin 0 */ + <300000000 633600000 902400000 + 1113600000 1401600000 1536000000 + 1747200000 1843200000>, + + /* Speed bin 1 */ + <300000000 633600000 902400000 + 1113600000 1401600000 1536000000 + 1747200000 1843200000>, + + /* Speed bin 3 */ <300000000 633600000 902400000 1113600000 1401600000 1536000000 - 1747200000 1843200000>; + 1612800000 1843200000>; qcom,allow-voltage-interpolation; qcom,allow-quotient-interpolation; @@ -806,11 +839,34 @@ regulator-max-microvolt = <7>; qcom,cpr-fuse-corners = <5>; - qcom,cpr-fuse-combos = <16>; - qcom,cpr-speed-bins = <2>; - qcom,cpr-speed-bin-corners = <7 7>; - qcom,cpr-corners = <7>; - qcom,cpr-corner-fmax-map = <2 3 4 6 7>; + qcom,cpr-fuse-combos = <32>; + qcom,cpr-speed-bins = <4>; + qcom,cpr-speed-bin-corners = <7 7 0 7>; + qcom,cpr-corners = + /* Speed-bin 0 */ + <7 7 7 7 7 7 7 7>, + + /* Speed-bin 1 */ + <7 7 7 7 7 7 7 7>, + + /* Speed-bin 1 */ + <0 0 0 0 0 0 0 0>, + + /* Speed-bin 3 */ + <7 7 7 7 7 7 7 7>; + + qcom,cpr-corner-fmax-map = + /* Speed-bin 0 */ + <2 3 4 6 7>, + + /* Speed-bin 1 */ + <2 3 4 6 7>, + + /* Speed-bin 2 */ + <0 0 0 0 0>, + + /* Speed-bin 3 */ + <2 3 4 6 7>; qcom,cpr-voltage-ceiling = <724000 724000 788000 868000 @@ -829,6 +885,11 @@ /* Speed bin 1 */ <300000000 1113600000 1401600000 1747200000 1958400000 2150400000 + 2208000000>, + + /* Speed bin 3 */ + <300000000 1113600000 1401600000 + 1747200000 1804800000 2150400000 2208000000>; qcom,allow-voltage-interpolation; diff --git a/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts b/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts index dff55d8e9cf8..b1a752b46a41 100644 --- a/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts +++ b/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts @@ -22,7 +22,8 @@ compatible = "qcom,sdm660-mtp", "qcom,sdm660", "qcom,mtp"; qcom,board-id = <8 2>; qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>, - <0x0001001b 0x0201011a 0x0 0x0>; + <0x0001001b 0x0201011a 0x0 0x0>, + <0x0001001b 0x0102001a 0x0 0x0>; }; &tavil_snd { diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi index ba860e4e80a5..c626698ffd51 100644 --- a/arch/arm/boot/dts/qcom/sdm660.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660.dtsi @@ -1254,10 +1254,11 @@ compatible = "qcom,clk-cpu-osm"; reg = <0x179c0000 0x4000>, <0x17916000 0x1000>, <0x17816000 0x1000>, <0x179d1000 0x1000>, - <0x00784130 0x8>, <0x17914800 0x800>; + <0x00784130 0x8>, <0x00784130 0x8>, + <0x17914800 0x800>; reg-names = "osm", "pwrcl_pll", "perfcl_pll", - "apcs_common", "perfcl_efuse", - "pwrcl_acd"; + "apcs_common", "pwrcl_efuse", + "perfcl_efuse", "pwrcl_acd"; qcom,acdtd-val = <0x0000a111 0x0000a111>; qcom,acdcr-val = <0x002c5ffd 0x002c5ffd>; @@ -1283,6 +1284,25 @@ < 1747200000 0x0404005b 0x09480048 0x2 7 >, < 1843200000 0x04040060 0x094c004c 0x3 8 >; + qcom,pwrcl-speedbin1-v0 = + < 300000000 0x0004000f 0x01200020 0x1 1 >, + < 633600000 0x05040021 0x03200020 0x1 2 >, + < 902400000 0x0404002f 0x04260026 0x1 3 >, + < 1113600000 0x0404003a 0x052e002e 0x2 4 >, + < 1401600000 0x04040049 0x073a003a 0x2 5 >, + < 1536000000 0x04040050 0x08400040 0x2 6 >, + < 1747200000 0x0404005b 0x09480048 0x2 7 >, + < 1843200000 0x04040060 0x094c004c 0x3 8 >; + + qcom,pwrcl-speedbin3-v0 = + < 300000000 0x0004000f 0x01200020 0x1 1 >, + < 633600000 0x05040021 0x03200020 0x1 2 >, + < 902400000 0x0404002f 0x04260026 0x1 3 >, + < 1113600000 0x0404003a 0x052e002e 0x2 4 >, + < 1401600000 0x04040049 0x073a003a 0x2 5 >, + < 1536000000 0x04040050 0x08400040 0x2 6 >, + < 1612800000 0x04040054 0x09430043 0x2 7 >; + qcom,perfcl-speedbin0-v0 = < 300000000 0x0004000f 0x01200020 0x1 1 >, < 1113600000 0x0404003a 0x052e002e 0x1 2 >, @@ -1301,6 +1321,13 @@ < 2150400000 0x04040070 0x0b590059 0x2 6 >, < 2208000000 0x04040073 0x0b5c005c 0x3 7 >; + qcom,perfcl-speedbin3-v0 = + < 300000000 0x0004000f 0x01200020 0x1 1 >, + < 1113600000 0x0404003a 0x052e002e 0x1 2 >, + < 1401600000 0x04040049 0x073a003a 0x2 3 >, + < 1747200000 0x0404005b 0x09480048 0x2 4 >, + < 1804800000 0x0404005e 0x094b004b 0x2 5 >; + qcom,up-timer = <1000 1000>; qcom,down-timer = <1000 1000>; qcom,pc-override-index = <0 0>; @@ -1354,6 +1381,7 @@ < 1113600 >, < 1401600 >, < 1536000 >, + < 1612800 >, < 1747200 >, < 1843200 >; @@ -1361,6 +1389,7 @@ < 1113600 >, < 1401600 >, < 1747200 >, + < 1804800 >, < 1958400 >, < 2150400 >, < 2208000 >, @@ -1577,6 +1606,7 @@ qcom,msm_fastrpc { compatible = "qcom,msm-fastrpc-adsp"; qcom,fastrpc-glink; + qcom,fastrpc-vmid-heap-shared; qcom,msm_fastrpc_compute_cb1 { compatible = "qcom,msm-fastrpc-compute-cb"; @@ -2055,6 +2085,7 @@ qcom,firmware-name = "modem"; qcom,pil-self-auth; qcom,sysmon-id = <0>; + qcom,minidump-id = <0>; qcom,ssctl-instance-id = <0x12>; qcom,qdsp6v62-1-5; memory-region = <&modem_fw_mem>; @@ -2129,6 +2160,11 @@ compatible = "qcom,msm-imem-diag-dload"; reg = <0xc8 200>; }; + + ss_mdump@b88 { + compatible = "qcom,msm-imem-minidump"; + reg = <0xb88 28>; + }; }; qcom,ghd { diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-agave.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-agave.dtsi new file mode 100644 index 000000000000..fddffee703d1 --- /dev/null +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-agave.dtsi @@ -0,0 +1,50 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + qcom,msm-dai-mi2s { + dai_mi2s_sec: qcom,msm-dai-q6-mi2s-sec { + qcom,msm-mi2s-rx-lines = <2>; + qcom,msm-mi2s-tx-lines = <1>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&sec_mi2s_active &sec_mi2s_sd0_active + &sec_mi2s_sd1_active>; + pinctrl-1 = <&sec_mi2s_sleep &sec_mi2s_sd0_sleep + &sec_mi2s_sd1_sleep>; + }; + + dai_mi2s: qcom,msm-dai-q6-mi2s-tert { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&tert_mi2s_active &tert_mi2s_sd0_active>; + pinctrl-1 = <&tert_mi2s_sleep &tert_mi2s_sd0_sleep>; + }; + + dai_mi2s_quat: qcom,msm-dai-q6-mi2s-quat { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&quat_mi2s_active &quat_mi2s_sd0_active>; + pinctrl-1 = <&quat_mi2s_sleep &quat_mi2s_sd0_sleep>; + }; + }; + + qcom,msm-dai-tdm-tert-rx { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&tert_tdm_dout_active>; + pinctrl-1 = <&tert_tdm_dout_sleep>; + }; + + qcom,msm-dai-tdm-quat-rx { + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&quat_tdm_dout_active>; + pinctrl-1 = <&quat_tdm_dout_sleep>; + }; +}; + diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-ion.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-ion.dtsi index 1176b54835b1..045cc44b2d4c 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-ion.dtsi +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-ion.dtsi @@ -22,6 +22,12 @@ qcom,ion-heap-type = "CARVEOUT"; }; + qcom,ion-heap@27 { /* QSEECOM HEAP */ + reg = <27>; + memory-region = <&qseecom_mem>; + qcom,ion-heap-type = "DMA"; + }; + qcom,ion-heap@28 { /* Audio Heap */ reg = <28>; memory-region = <&ion_audio>; diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-pinctrl.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-pinctrl.dtsi new file mode 100644 index 000000000000..ce7741f75b24 --- /dev/null +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-pinctrl.dtsi @@ -0,0 +1,538 @@ +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + tlmm: pinctrl@01010000 { + compatible = "qcom,msm8996-pinctrl"; + reg = <0x01010000 0x300000>; + interrupts = <0 208 0>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + + blsp1_uart2_active: blsp1_uart2_active { + mux { + pins = "gpio41", "gpio42", "gpio43", "gpio44"; + function = "blsp_uart2"; + }; + + config { + pins = "gpio41", "gpio42", "gpio43", "gpio44"; + drive-strength = <2>; + bias-disable; + }; + }; + + blsp1_uart2_sleep: blsp1_uart2_sleep { + mux { + pins = "gpio41", "gpio42", "gpio43", "gpio44"; + function = "gpio"; + }; + + config { + pins = "gpio41", "gpio42", "gpio43", "gpio44"; + drive-strength = <2>; + bias-disable; + }; + }; + + usb_hub_reset_active: usb_hub_reset_active { + usb_hub_reset_active { + pins = "gpio103"; + drive-strength = <8>; /* 8 mA */ + bias-pull-up; /* pull up */ + output-high; + }; + }; + + usb_hub_reset_suspend: usb_hub_reset_suspend { + usb_hub_reset_suspend { + pins = "gpio103"; + drive-strength = <2>; /* 2 mA */ + bias-disable= <0>; /* no pull */ + }; + }; + + i2c_6 { + i2c_6_active: i2c_6_active { + mux { + pins = "gpio27", "gpio28"; + function = "blsp_i2c6"; + }; + + config { + pins = "gpio27", "gpio28"; + drive-strength = <2>; + bias-disable; + }; + }; + + i2c_6_sleep: i2c_6_sleep { + mux { + pins = "gpio27", "gpio28"; + function = "blsp_i2c6"; + }; + + config { + pins = "gpio27", "gpio28"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + i2c_8 { + i2c_8_active: i2c_8_active { + mux { + pins = "gpio6", "gpio7"; + function = "blsp_i2c8"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <4>; + bias-disable; + }; + }; + + i2c_8_sleep: i2c_8_sleep { + mux { + pins = "gpio6", "gpio7"; + function = "blsp_i2c8"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <4>; + bias-pull-up; + }; + }; + }; + + spi_9 { + spi_9_active: spi_9_active { + mux { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + function = "blsp_spi9"; + }; + + config { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + drive-strength = <6>; + bias-disable; + }; + }; + + spi_9_sleep: spi_9_sleep { + mux { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + function = "blsp_spi9"; + }; + + config { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + cnss_pins { + cnss_bootstrap_active: cnss_bootstrap_active { + mux { + pins = "gpio46"; + function = "gpio"; + }; + + config { + pins = "gpio46"; + drive-strength = <16>; + output-high; + bias-pull-up; + }; + }; + cnss_bootstrap_sleep: cnss_bootstrap_sleep { + mux { + pins = "gpio46"; + function = "gpio"; + }; + + config { + pins = "gpio46"; + drive-strength = <2>; + output-low; + bias-pull-down; + }; + }; + }; + + sec_mi2s { + sec_mi2s_sleep: sec_mi2s_sleep { + mux { + pins = "gpio80", "gpio81"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio80", "gpio81"; + drive-strength = <2>; /* 2 mA */ + bias-disable; /* NO PULL */ + }; + }; + sec_mi2s_active: sec_mi2s_active { + mux { + pins = "gpio80", "gpio81"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio80", "gpio81"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + sec_mi2s_sd0 { + sec_mi2s_sd0_sleep: sec_mi2s_sd0_sleep { + mux { + pins = "gpio82"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio82"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + sec_mi2s_sd0_active: sec_mi2s_sd0_active { + mux { + pins = "gpio82"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio82"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + sec_mi2s_sd1 { + sec_mi2s_sd1_sleep: sec_mi2s_sd1_sleep { + mux { + pins = "gpio83"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio83"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + sec_mi2s_sd1_active: sec_mi2s_sd1_active { + mux { + pins = "gpio83"; + function = "sec_mi2s"; + }; + + config { + pins = "gpio83"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + tert_mi2s { + tert_mi2s_sleep: tert_mi2s_sleep { + mux { + pins = "gpio75", "gpio76"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio75", "gpio76"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + tert_mi2s_active: tert_mi2s_active { + mux { + pins = "gpio75", "gpio76"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio75", "gpio76"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + tert_mi2s_sd0 { + tert_mi2s_sd0_sleep: tert_mi2s_sd0_sleep { + mux { + pins = "gpio77"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio77"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + tert_mi2s_sd0_active: tert_mi2s_sd0_active { + mux { + pins = "gpio77"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio77"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + quat_mi2s { + quat_mi2s_sleep: quat_mi2s_sleep { + mux { + pins = "gpio58", "gpio59"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio58", "gpio59"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + quat_mi2s_active: quat_mi2s_active { + mux { + pins = "gpio58", "gpio59"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio58", "gpio59"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + quat_mi2s_sd0 { + quat_mi2s_sd0_sleep: quat_mi2s_sd0_sleep { + mux { + pins = "gpio60"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio60"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + quat_mi2s_sd0_active: quat_mi2s_sd0_active { + mux { + pins = "gpio60"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio60"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + tert_tdm { + tert_tdm_sleep: tert_tdm_sleep { + mux { + pins = "gpio75", "gpio76"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio75", "gpio76"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + tert_tdm_active: tert_tdm_active { + mux { + pins = "gpio75", "gpio76"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio75", "gpio76"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + tert_tdm_din { + tert_tdm_din_sleep: tert_tdm_din_sleep { + mux { + pins = "gpio77"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio77"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + tert_tdm_din_active: tert_tdm_din_active { + mux { + pins = "gpio77"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio77"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + tert_tdm_dout { + tert_tdm_dout_sleep: tert_tdm_dout_sleep { + mux { + pins = "gpio78"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio78"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + tert_tdm_dout_active: tert_tdm_dout_active { + mux { + pins = "gpio78"; + function = "ter_mi2s"; + }; + + config { + pins = "gpio78"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + quat_tdm { + quat_tdm_sleep: quat_tdm_sleep { + mux { + pins = "gpio58", "gpio59"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio58", "gpio59"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + quat_tdm_active: quat_tdm_active { + mux { + pins = "gpio58", "gpio59"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio58", "gpio59"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + output-high; + }; + }; + }; + + quat_tdm_din { + quat_tdm_din_sleep: quat_tdm_din_sleep { + mux { + pins = "gpio60"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio60"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + quat_tdm_din_active: quat_tdm_din_active { + mux { + pins = "gpio60"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio60"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + + quat_tdm_dout { + quat_tdm_dout_sleep: quat_tdm_dout_sleep { + mux { + pins = "gpio61"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio61"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* PULL DOWN */ + }; + }; + quat_tdm_dout_active: quat_tdm_dout_active { + mux { + pins = "gpio61"; + function = "qua_mi2s"; + }; + + config { + pins = "gpio61"; + drive-strength = <8>; /* 8 mA */ + bias-disable; /* NO PULL */ + }; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts index 5c0bbbccafea..e6d9f7b7d2f2 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts @@ -13,12 +13,20 @@ /dts-v1/; #include "skeleton64.dtsi" +#include <dt-bindings/clock/msm-clocks-8996.h> +#include <dt-bindings/interrupt-controller/arm-gic.h> / { model = "Qualcomm Technologies, Inc. MSM 8996"; compatible = "qcom,msm8996"; qcom,msm-id = <246 0x0>; + aliases { + spi9 = &spi_9; + i2c6 = &i2c_6; + i2c8 = &i2c_8; + }; + soc: soc { }; psci { @@ -53,14 +61,33 @@ reg = <0 0xc8000000 0 0x00400000>; label = "ion_audio_mem"; }; + modem_mem: modem_region@88800000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x88800000 0 0x6200000>; + }; + peripheral_mem: peripheral_region@8ea00000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0 0x8ea00000 0 0x2b00000>; + }; + adsp_mem: adsp_region { + compatible = "shared-dma-pool"; + alloc-ranges = <0 0x00000000 0 0xffffffff>; + reusable; + alignment = <0 0x100000>; + size = <0 0x400000>; + }; }; }; #include "vplatform-lfv-ion.dtsi" +#include "vplatform-lfv-smmu.dtsi" &soc { #address-cells = <1>; #size-cells = <1>; + virtual-interrupt-parent = "gic"; ranges = <0 0 0 0xffffffff>; compatible = "simple-bus"; @@ -125,11 +152,17 @@ asoc-codec = <&stub_codec>; asoc-codec-names = "msm-stub-codec.1"; }; + qcom,msm-adsp-loader { + status = "ok"; + compatible = "qcom,adsp-loader"; + qcom,adsp-state = <0>; + }; qcom,msm-audio-ion { compatible = "qcom,msm-audio-ion"; + qcom,smmu-version = <2>; qcom,smmu-enabled; - qcom,smmu-sid = <1>; + iommus = <&lpass_q6_smmu 1>; }; pcm0: qcom,msm-pcm { @@ -587,4 +620,580 @@ hostless: qcom,msm-pcm-hostless { compatible = "qcom,msm-pcm-hostless"; }; + + qcom,msm-adsprpc-mem { + compatible = "qcom,msm-adsprpc-mem-region"; + memory-region = <&adsp_mem>; + }; + + qcom,msm_fastrpc { + compatible = "qcom,msm-fastrpc-adsp"; + + qcom,msm_fastrpc_compute_cb1 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 8>; + }; + qcom,msm_fastrpc_compute_cb2 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 9>; + }; + qcom,msm_fastrpc_compute_cb3 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 10>; + }; + qcom,msm_fastrpc_compute_cb4 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 11>; + }; + qcom,msm_fastrpc_compute_cb5 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 12>; + }; + qcom,msm_fastrpc_compute_cb6 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 5>; + }; + qcom,msm_fastrpc_compute_cb7 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 6>; + }; + qcom,msm_fastrpc_compute_cb8 { + compatible = "qcom,msm-fastrpc-compute-cb"; + label = "adsprpc-smd"; + iommus = <&lpass_q6_smmu 7>; + }; + }; +}; + +#include "vplatform-lfv-msm8996-pinctrl.dtsi" +#include "msm8996-smp2p.dtsi" + +&soc { + qcom,ipc-spinlock@740000 { + compatible = "qcom,ipc-spinlock-sfpb"; + reg = <0x740000 0x8000>; + qcom,num-locks = <8>; + }; + + qcom,smem@86000000 { + compatible = "qcom,smem"; + reg = <0x86000000 0x200000>, + <0x9820010 0x4>, + <0x7b4000 0x8>; + reg-names = "smem", "irq-reg-base", + "smem_targ_info_reg"; + qcom,mpu-enabled; + + qcom,smd-modem { + compatible = "qcom,smd"; + qcom,smd-edge = <0>; + qcom,smd-irq-offset = <0x0>; + qcom,smd-irq-bitmask = <0x1000>; + interrupts = <0 449 1>; + label = "modem"; + qcom,not-loadable; + }; + + qcom,smd-adsp { + compatible = "qcom,smd"; + qcom,smd-edge = <0x1>; + qcom,smd-irq-offset = <0x0>; + qcom,smd-irq-bitmask = <0x100>; + interrupts = <0x0 0x9c 0x1>; + label = "adsp"; + }; + }; + + qcom,rmtfs_sharedmem@0 { + compatible = "qcom,sharedmem-uio"; + reg = <0x85e00000 0x00200000>; + reg-names = "rmtfs"; + qcom,client-id = <0x00000001>; + }; + + qcom,glink-smem-native-xprt-modem@86000000 { + compatible = "qcom,glink-smem-native-xprt"; + reg = <0x86000000 0x200000>, + <0x9820010 0x4>; + reg-names = "smem", "irq-reg-base"; + qcom,irq-mask = <0x8000>; + interrupts = <0 452 1>; + label = "mpss"; + }; + + qcom,glink-smem-native-xprt-adsp@86000000 { + compatible = "qcom,glink-smem-native-xprt"; + reg = <0x86000000 0x200000>, + <0x9820010 0x4>; + reg-names = "smem", "irq-reg-base"; + qcom,irq-mask = <0x200>; + interrupts = <0 157 1>; + label = "lpass"; + qcom,qos-config = <0x1b8>; + qcom,ramp-time = <0xaf>; + }; + + qcom,glink-qos-config-adsp { + compatible = "qcom,glink-qos-config"; + qcom,flow-info = <0x3c 0x0 0x3c 0x0 0x3c 0x0 0x3c 0x0>; + qcom,mtu-size = <0x800>; + qcom,tput-stats-cycle = <0xa>; + linux,phandle = <0x1b8>; + phandle = <0x1b8>; + }; + + /* IPC router */ + qcom,ipc_router { + compatible = "qcom,ipc_router"; + qcom,node-id = <1>; + }; + + qcom,ipc_router_modem_xprt { + compatible = "qcom,ipc_router_glink_xprt"; + qcom,ch-name = "IPCRTR"; + qcom,xprt-remote = "mpss"; + qcom,glink-xprt = "smd_trans"; + qcom,xprt-linkid = <1>; + qcom,xprt-version = <1>; + qcom,fragmented-data; + }; + + qcom,ipc_router_q6_xprt { + compatible = "qcom,ipc_router_glink_xprt"; + qcom,ch-name = "IPCRTR"; + qcom,xprt-remote = "lpass"; + qcom,glink-xprt = "smd_trans"; + qcom,xprt-linkid = <1>; + qcom,xprt-version = <1>; + qcom,fragmented-data; + }; + + /* IPA including NDP-BAM */ + ipa_hw: qcom,ipa@680000 { + compatible = "qcom,ipa"; + reg = <0x680000 0x4effc>, + <0x684000 0x26934>; + reg-names = "ipa-base", "bam-base"; + interrupts = <0 333 0>, + <0 432 0>; + interrupt-names = "ipa-irq", "bam-irq"; + qcom,ipa-hw-ver = <5>; /* IPA core version = IPAv2.5 */ + qcom,ipa-hw-mode = <0>; + qcom,ee = <0>; + qcom,use-ipa-tethering-bridge; + qcom,ipa-bam-remote-mode; + qcom,modem-cfg-emb-pipe-flt; + clocks = <&clock_gcc clk_ipa_clk>; + clock-names = "core_clk"; + qcom,use-dma-zone; + qcom,msm-bus,name = "ipa"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + <90 512 0 0>, <90 585 0 0>, /* No vote */ + <90 512 80000 640000>, <90 585 80000 640000>, /* SVS */ + <90 512 206000 960000>, <90 585 206000 960000>; /* PERF */ + qcom,bus-vector-names = "MIN", "SVS", "PERF"; + }; + + /* rmnet over IPA */ + qcom,rmnet-ipa { + compatible = "qcom,rmnet-ipa"; + qcom,rmnet-ipa-ssr; + qcom,ipa-loaduC; + qcom,ipa-advertise-sg-support; + }; + + /* SPS */ + qcom,sps { + compatible = "qcom,msm_sps_4k"; + qcom,device-type = <3>; + qcom,pipe-attr-ee; + }; + + clock_gcc: qcom,gcc@300000 { + compatible = "qcom,dummycc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + clock_mmss: qcom,mmsscc@8c0000 { + compatible = "qcom,dummycc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + clock_gpu: qcom,gpucc@8c0000 { + compatible = "qcom,dummycc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + clock_debug: qcom,cc-debug@362000 { + compatible = "qcom,dummycc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + clock_cpu: qcom,cpu-clock-8996@ { + compatible = "qcom,dummycc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; + + pil_modem: qcom,mss@2080000 { + compatible = "qcom,pil-q6v55-mss"; + reg = <0x2080000 0x100>, + <0x0763000 0x008>, + <0x0765000 0x008>, + <0x0764000 0x008>, + <0x2180000 0x020>, + <0x038f008 0x004>; + reg-names = "qdsp6_base", "halt_q6", "halt_modem", + "halt_nc", "rmb_base", "restart_reg"; + + clocks = <&clock_gcc clk_cxo_clk_src>, + <&clock_gcc clk_gcc_mss_cfg_ahb_clk>, + <&clock_gcc clk_pnoc_clk>, + <&clock_gcc clk_gcc_mss_q6_bimc_axi_clk>, + <&clock_gcc clk_gcc_boot_rom_ahb_clk>, + <&clock_gcc clk_gpll0_out_msscc>, + <&clock_gcc clk_gcc_mss_snoc_axi_clk>, + <&clock_gcc clk_gcc_mss_mnoc_bimc_axi_clk>, + <&clock_gcc clk_qdss_clk>; + clock-names = "xo", "iface_clk", "pnoc_clk", "bus_clk", + "mem_clk", "gpll0_mss_clk", "snoc_axi_clk", + "mnoc_axi_clk", "qdss_clk"; + qcom,proxy-clock-names = "xo", "pnoc_clk", "qdss_clk"; + qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk", + "gpll0_mss_clk", "snoc_axi_clk", + "mnoc_axi_clk"; + + interrupts = <0 448 1>; + vdd_cx-supply = <&pm8994_s1_corner>; + vdd_cx-voltage = <7>; + vdd_mx-supply = <&pm8994_s2_corner>; + vdd_mx-uV = <6>; + vdd_pll-supply = <&pm8994_l12>; + qcom,vdd_pll = <1800000>; + qcom,firmware-name = "modem"; + qcom,pil-self-auth; + qcom,sysmon-id = <0>; + qcom,ssctl-instance-id = <0x12>; + qcom,override-acc; + qcom,ahb-clk-vote; + qcom,pnoc-clk-vote; + qcom,qdsp6v56-1-5; + qcom,mx-spike-wa; + memory-region = <&modem_mem>; + qcom,mem-protect-id = <0xF>; + + /* GPIO inputs from mss */ + qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>; + qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>; + qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>; + qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>; + qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>; + + /* GPIO output to mss */ + qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>; + status = "ok"; + }; + + pm8994_s1_corner: regulator-s1-corner { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_s1_corner"; + regulator-min-microvolt = <1>; + regulator-max-microvolt = <7>; + }; + + pm8994_s1_floor_corner: regulator-s1-floor-corner { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_s1_floor_corner"; + }; + + pm8994_s1_corner_ao: regulator-s1-corner-ao { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_s1_corner_ao"; + }; + + pm8994_s2_corner: regulator-s2-corner { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_s2_corner"; + regulator-min-microvolt = <1>; + regulator-max-microvolt = <7>; + }; + + pm8994_s2_corner_ao: regulator-s2-corner-ao { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_s2_corner_ao"; + }; + + pm8994_l12: regulator-l12 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_l12"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + }; + + pm8994_l30: regulator-l30 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_l30"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + qcom,init-voltage = <1800000>; + }; + + pm8994_l18_pin_ctrl: regulator-l18-pin-ctrl { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_l18_pin_ctrl"; + regulator-min-microvolt = <2700000>; + regulator-max-microvolt = <2900000>; + qcom,init-voltage = <2700000>; + }; + + pm8994_l26_corner: regulator-l26-corner { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_l26_corner"; + }; + + pm8994_l26_floor_corner: regulator-l26-floor-corner { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_l26_floor_corner"; + }; + + pmi8994_boost_pin_ctrl: regulator-bst-pin-ctrl { + compatible = "qcom,stub-regulator"; + regulator-name = "pmi8994_boost_pin_ctrl"; + }; + + pm8994_s11: spm-regulator@3200 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_s11"; + }; + + pmi8994_s2: regulator@1700 { + compatible = "qcom,stub-regulator"; + regulator-name = "pmi8994_s2"; + }; + + pm8994_s3: regulator-s3 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_s3"; + regulator-min-microvolt = <1300000>; + regulator-max-microvolt = <1300000>; + qcom,init-voltage = <1300000>; + }; + + pm8994_s4: regulator-s4 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8994_s4"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + qcom,init-voltage = <1800000>; + }; + + pm8004_s2: regulator@1700 { + compatible = "qcom,stub-regulator"; + regulator-name = "pm8004_s2"; + }; + + spi_eth_vreg: spi_eth_phy_vreg { + compatible = "qcom,stub-regulator"; + regulator-name = "ethernet_phy"; + }; + + usb_otg_switch: usb-otg-switch { + compatible = "qcom,stub-regulator"; + regulator-name = "usb_otg_vreg"; + }; + + rome_vreg: rome_vreg { + compatible = "qcom,stub-regulator"; + regulator-name = "rome_vreg"; + }; + + wlan_en_vreg: wlan_en_vreg { + compatible = "qcom,stub-regulator"; + regulator-name = "wlan_en_vreg"; + }; + + hl7509_en_vreg: hl7509_en_vreg { + compatible = "qcom,stub-regulator"; + regulator-name = "hl7509_en_vreg"; + }; + + gdsc_mmagic_camss: qcom,gdsc@8c3c4c { + compatible = "qcom,stub-regulator"; + regulator-name = "gdsc_mmagic_camss"; + regulator-min-microvolt = <1>; + regulator-max-microvolt = <7>; + }; + + gdsc_hlos1_vote_lpass_adsp: qcom,gdsc@37d034 { + compatible = "qcom,stub-regulator"; + regulator-name = "gdsc_hlos1_vote_lpass_adsp"; + regulator-min-microvolt = <1>; + regulator-max-microvolt = <7>; + }; + + spi_9: spi@75B7000 { /* BLSP2 QUP3 */ + compatible = "qcom,spi-qup-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg-names = "spi_physical"; + reg = <0x075B7000 0x600>; + interrupt-names = "spi_irq"; + interrupts = <0 103 0>; + spi-max-frequency = <19200000>; + qcom,infinite-mode = <0>; + qcom,ver-reg-exists; + qcom,master-id = <84>; + qcom,use-pinctrl; + pinctrl-names = "spi_default", "spi_sleep"; + pinctrl-0 = <&spi_9_active>; + pinctrl-1 = <&spi_9_sleep>; + clock-names = "iface_clk", "core_clk"; + clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>, + <&clock_gcc clk_gcc_blsp2_qup3_spi_apps_clk>; + }; + + i2c_6: i2c@757a000 { /* BLSP1 QUP6 */ + compatible = "qcom,i2c-msm-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x757a000 0x1000>; + reg-names = "qup_phys_addr"; + interrupt-names = "qup_irq"; + interrupts = <0 100 0>; + qcom,disable-dma; + qcom,master-id = <86>; + qcom,clk-freq-out = <400000>; + qcom,clk-freq-in = <19200000>; + clock-names = "iface_clk", "core_clk"; + clocks = <&clock_gcc clk_gcc_blsp1_ahb_clk>, + <&clock_gcc clk_gcc_blsp1_qup6_i2c_apps_clk>; + pinctrl-names = "i2c_active", "i2c_sleep"; + pinctrl-0 = <&i2c_6_active>; + pinctrl-1 = <&i2c_6_sleep>; + }; + + i2c_8: i2c@75b6000 { /* BLSP2 QUP2 */ + compatible = "qcom,i2c-msm-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg-names = "qup_phys_addr"; + reg = <0x75b6000 0x1000>; + interrupt-names = "qup_irq"; + interrupts = <0 102 0>; + qcom,disable-dma; + qcom,master-id = <84>; + qcom,clk-freq-out = <400000>; + qcom,clk-freq-in = <19200000>; + clock-names = "iface_clk", "core_clk"; + clocks = <&clock_gcc clk_gcc_blsp2_ahb_clk>, + <&clock_gcc clk_gcc_blsp2_qup2_i2c_apps_clk>; + pinctrl-names = "i2c_active", "i2c_sleep"; + pinctrl-0 = <&i2c_8_active>; + pinctrl-1 = <&i2c_8_sleep>; + }; + + blsp1_uart2: uart@07570000 { /* BLSP1 UART2 */ + compatible = "qcom,msm-hsuart-v14"; + reg = <0x07570000 0x1000>, + <0x7544000 0x2b000>; + reg-names = "core_mem", "bam_mem"; + interrupt-names = "core_irq", "bam_irq", "wakeup_irq"; + interrupts = <0 108 0>, <0 238 0>, <0 810 0>; + #address-cells = <0>; + + qcom,inject-rx-on-wakeup; + qcom,rx-char-to-inject = <0xFD>; + + qcom,bam-tx-ep-pipe-index = <2>; + qcom,bam-rx-ep-pipe-index = <3>; + qcom,master-id = <86>; + clock-names = "core_clk", "iface_clk"; + clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>, + <&clock_gcc clk_gcc_blsp1_ahb_clk>; + pinctrl-names = "sleep", "default"; + pinctrl-0 = <&blsp1_uart2_sleep>; + pinctrl-1 = <&blsp1_uart2_active>; + + qcom,msm-bus,name = "buart2"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <86 512 0 0>, + <86 512 500 800>; + }; + + qcom,lpass@9300000 { + compatible = "qcom,pil-tz-generic"; + reg = <0x9300000 0x00100>; + interrupts = <0 162 1>; + + vdd_cx-supply = <&pm8994_s1_corner>; + qcom,proxy-reg-names = "vdd_cx"; + qcom,vdd_cx-uV-uA = <7 100000>; + + clocks = <&clock_gcc clk_cxo_pil_lpass_clk>; + clock-names = "xo"; + qcom,proxy-clock-names = "xo"; + + qcom,pas-id = <1>; + qcom,proxy-timeout-ms = <10000>; + qcom,smem-id = <423>; + qcom,sysmon-id = <1>; + qcom,ssctl-instance-id = <0x14>; + qcom,firmware-name = "adsp"; + qcom,edge = "lpass"; + memory-region = <&peripheral_mem>; + + /* GPIO inputs from lpass */ + qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>; + qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>; + qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>; + qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>; + + /* GPIO output to lpass */ + qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>; + }; + + qcom,cnss { + compatible = "qcom,cnss"; + wlan-bootstrap-gpio = <&tlmm 46 0>; + vdd-wlan-en-supply = <&wlan_en_vreg>; + vdd-wlan-supply = <&rome_vreg>; + vdd-wlan-io-supply = <&pm8994_s4>; + vdd-wlan-xtal-supply = <&pm8994_l30>; + vdd-wlan-core-supply = <&pm8994_s3>; + wlan-ant-switch-supply = <&pm8994_l18_pin_ctrl>; + qcom,wlan-en-vreg-support; + qcom,notify-modem-status; + pinctrl-names = "bootstrap_active", "bootstrap_sleep"; + pinctrl-0 = <&cnss_bootstrap_active>; + pinctrl-1 = <&cnss_bootstrap_sleep>; + + qcom,msm-bus,name = "msm-cnss"; + qcom,msm-bus,num-cases = <4>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + /* No vote */ + <45 512 0 0>, + /* Up to 200 Mbps */ + <45 512 41421 1520000>, + /* Up to 400 Mbps */ + <45 512 96650 1520000>, + /* Up to 800 Mbps */ + <45 512 207108 14432000>; + }; }; +#include "vplatform-lfv-agave.dtsi" diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi new file mode 100644 index 000000000000..65eaa0c5aef9 --- /dev/null +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-smmu.dtsi @@ -0,0 +1,75 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "msm-arm-smmu.dtsi" +#include <dt-bindings/msm/msm-bus-ids.h> +#include <dt-bindings/interrupt-controller/arm-gic.h> + +&lpass_q6_smmu { + status = "ok"; + qcom,register-save; + qcom,skip-init; + #global-interrupts = <1>; + interrupts = <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>; + vdd-supply = <&gdsc_hlos1_vote_lpass_adsp>; + clocks = <&clock_gcc clk_hlos1_vote_lpass_adsp_smmu_clk>; + clock-names = "lpass_q6_smmu_clocks"; + #clock-cells = <1>; +}; + +&cpp_fd_smmu { + status = "ok"; + qcom,register-save; + qcom,skip-init; + qcom,fatal-asf; + #global-interrupts = <1>; + interrupts = <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>; + vdd-supply = <&gdsc_mmagic_camss>; + clocks = <&clock_mmss clk_mmss_mmagic_ahb_clk>, + <&clock_mmss clk_mmss_mmagic_cfg_ahb_clk>, + <&clock_mmss clk_smmu_cpp_ahb_clk>, + <&clock_mmss clk_smmu_cpp_axi_clk>, + <&clock_mmss clk_mmagic_camss_axi_clk>; + clock-names = "mmagic_ahb_clk", "mmagic_cfg_ahb_clk", + "cpp_ahb_clk", "cpp_axi_clk", + "mmagic_camss_axi_clk"; + #clock-cells = <1>; + qcom,bus-master-id = <MSM_BUS_MASTER_CPP>; +}; + +&soc { + iommu_test_device { + compatible = "iommu-debug-test"; + /* + * 42 shouldn't be used by anyone on the cpp_fd_smmu. We just + * need _something_ here to get this node recognized by the + * SMMU driver. Our test uses ATOS, which doesn't use SIDs + * anyways, so using a dummy value is ok. + */ + iommus = <&cpp_fd_smmu 42>; + }; +}; diff --git a/arch/arm/configs/sdm660-perf_defconfig b/arch/arm/configs/sdm660-perf_defconfig index 878e720a927b..fbd36cd00ea0 100644 --- a/arch/arm/configs/sdm660-perf_defconfig +++ b/arch/arm/configs/sdm660-perf_defconfig @@ -93,6 +93,7 @@ CONFIG_XFRM_USER=y CONFIG_XFRM_STATISTICS=y CONFIG_NET_KEY=y CONFIG_INET=y +CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_VERBOSE=y @@ -615,6 +616,9 @@ CONFIG_EXT4_ENCRYPTION=y CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_EXT4_DEBUG=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y @@ -655,6 +659,7 @@ CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_SOURCE_DUMMY=y CONFIG_PFK=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm/configs/sdm660_defconfig b/arch/arm/configs/sdm660_defconfig index 524abcf83e77..af5adfeb1a41 100644 --- a/arch/arm/configs/sdm660_defconfig +++ b/arch/arm/configs/sdm660_defconfig @@ -91,6 +91,7 @@ CONFIG_XFRM_USER=y CONFIG_XFRM_STATISTICS=y CONFIG_NET_KEY=y CONFIG_INET=y +CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_VERBOSE=y @@ -617,6 +618,9 @@ CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y @@ -673,6 +677,7 @@ CONFIG_IRQSOFF_TRACER=y CONFIG_PREEMPT_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_CPU_FREQ_SWITCH_PROFILER=y +CONFIG_LKDTM=y CONFIG_MEMTEST=y CONFIG_PANIC_ON_DATA_CORRUPTION=y CONFIG_FREE_PAGES_RDONLY=y @@ -693,6 +698,7 @@ CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_SOURCE_DUMMY=y CONFIG_PFK=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index bfe2a2f5a644..22b73112b75f 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level) #define ftrace_return_address(n) return_address(n) +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME + +static inline bool arch_syscall_match_sym_name(const char *sym, + const char *name) +{ + if (!strcmp(sym, "sys_mmap2")) + sym = "sys_mmap_pgoff"; + else if (!strcmp(sym, "sys_statfs64_wrapper")) + sym = "sys_statfs64"; + else if (!strcmp(sym, "sys_fstatfs64_wrapper")) + sym = "sys_fstatfs64"; + else if (!strcmp(sym, "sys_arm_fadvise64_64")) + sym = "sys_fadvise64_64"; + + /* Ignore case since sym may start with "SyS" instead of "sys" */ + return !strcasecmp(sym, name); +} + #endif /* ifndef __ASSEMBLY__ */ #endif /* _ASM_ARM_FTRACE */ diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index e4a774f7aba1..360cea172b06 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1636,12 +1636,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) { + if (!kvm->arch.pgd) + return 0; trace_kvm_age_hva(start, end); return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); } int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) { + if (!kvm->arch.pgd) + return 0; trace_kvm_test_age_hva(hva); return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 0c369a5d59f9..d61f3ae80e15 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -103,6 +103,7 @@ config ARM64 select SYSCTL_EXCEPTION_TRACE select HAVE_CONTEXT_TRACKING select HAVE_ARM_SMCCC + select THREAD_INFO_IN_TASK help ARM 64-bit (AArch64) Linux support. @@ -247,6 +248,15 @@ config PGTABLE_LEVELS default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47 default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48 +config MSM_GVM_QUIN + bool "Enable virtualization Support for MSM kernel required for QUIN platform" + help + This enables support for MSM Kernel based virtual + machine for QUIN platform. + This helps to enable virtual driver support. + This should work on 64bit machine. + If you don't know what to do here, say N. + source "init/Kconfig" source "kernel/Kconfig.freezer" diff --git a/arch/arm64/configs/msm-auto-gvm-perf_defconfig b/arch/arm64/configs/msm-auto-gvm-perf_defconfig new file mode 100644 index 000000000000..2e551218af2d --- /dev/null +++ b/arch/arm64/configs/msm-auto-gvm-perf_defconfig @@ -0,0 +1,286 @@ +CONFIG_LOCALVERSION="-perf" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=15 +CONFIG_CGROUPS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_HMP=y +CONFIG_NAMESPACES=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_ARCH_QCOM=y +CONFIG_PCI=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW=y +CONFIG_ZSMALLOC=y +CONFIG_BALANCE_ANON_FILE_RECLAIM=y +CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y +CONFIG_SECCOMP=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +CONFIG_COMPAT=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_RMNET_DATA=y +CONFIG_RMNET_DATA_FC=y +CONFIG_RMNET_DATA_DEBUG_PKT=y +CONFIG_CAN=y +CONFIG_CAN_RH850=y +CONFIG_IPC_ROUTER=y +CONFIG_IPC_ROUTER_SECURITY=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_SRAM=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPPOE=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_EVBUG=m +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=m +CONFIG_SERIO_AMBAKMI=y +CONFIG_SERIAL_MSM_HS=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MSM_V2=y +CONFIG_SPI=y +CONFIG_SPI_QUP=y +CONFIG_SPI_SPIDEV=y +CONFIG_PINCTRL_MSM8996=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_WATCHDOG=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_DRM=y +# CONFIG_DRM_MSM is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_SOC=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_IPA=y +CONFIG_RMNET_IPA=y +CONFIG_COMMON_CLK_MSM=y +CONFIG_MSM_CLK_CONTROLLER_V2=y +CONFIG_REMOTE_SPINLOCK_MSM=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_TESTS=y +CONFIG_MSM_SMEM=y +CONFIG_MSM_SMD=y +CONFIG_MSM_GLINK=y +CONFIG_MSM_GLINK_LOOPBACK_SERVER=y +CONFIG_MSM_GLINK_SMD_XPRT=y +CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y +CONFIG_MSM_SMEM_LOGGING=y +CONFIG_MSM_SMP2P=y +CONFIG_MSM_SMP2P_TEST=y +CONFIG_MSM_QMI_INTERFACE=y +CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y +CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_PIL_MSS_QDSP6V5=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_FUSE_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_IPC_LOGGING=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_QMI_ENCDEC=y diff --git a/arch/arm64/configs/msm-auto-gvm_defconfig b/arch/arm64/configs/msm-auto-gvm_defconfig new file mode 100644 index 000000000000..a6d36c314a4a --- /dev/null +++ b/arch/arm64/configs/msm-auto-gvm_defconfig @@ -0,0 +1,316 @@ +CONFIG_SYSVIPC=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=15 +CONFIG_CGROUPS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_HMP=y +CONFIG_NAMESPACES=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_ARCH_QCOM=y +CONFIG_PCI=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW=y +CONFIG_ZSMALLOC=y +CONFIG_BALANCE_ANON_FILE_RECLAIM=y +CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y +CONFIG_SECCOMP=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +CONFIG_COMPAT=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_RMNET_DATA=y +CONFIG_RMNET_DATA_FC=y +CONFIG_RMNET_DATA_DEBUG_PKT=y +CONFIG_CAN=y +CONFIG_CAN_RH850=y +CONFIG_IPC_ROUTER=y +CONFIG_IPC_ROUTER_SECURITY=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_SRAM=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPPOE=y +CONFIG_PPP_ASYNC=y +CONFIG_PPP_SYNC_TTY=y +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_EVBUG=m +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=m +CONFIG_SERIO_AMBAKMI=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_MSM_HS=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MSM_V2=y +CONFIG_SPI=y +CONFIG_SPI_DEBUG=y +CONFIG_SPI_QUP=y +CONFIG_SPI_SPIDEV=y +CONFIG_PINCTRL_MSM8996=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_WATCHDOG=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_DRM=y +# CONFIG_DRM_MSM is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_SOC=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_IPA=y +CONFIG_RMNET_IPA=y +CONFIG_COMMON_CLK_MSM=y +CONFIG_MSM_CLK_CONTROLLER_V2=y +CONFIG_REMOTE_SPINLOCK_MSM=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y +CONFIG_ARM_SMMU=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_MSM_SMEM=y +CONFIG_MSM_SMD=y +CONFIG_MSM_SMD_DEBUG=y +CONFIG_MSM_GLINK=y +CONFIG_MSM_GLINK_LOOPBACK_SERVER=y +CONFIG_MSM_GLINK_SMD_XPRT=y +CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y +CONFIG_MSM_SMEM_LOGGING=y +CONFIG_MSM_SMP2P=y +CONFIG_MSM_SMP2P_TEST=y +CONFIG_MSM_QMI_INTERFACE=y +CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y +CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_PIL_MSS_QDSP6V5=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_FUSE_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_PANIC_ON_SCHED_BUG=y +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_LIST=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_IPC_LOGGING=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PANIC_ON_DATA_CORRUPTION=y +CONFIG_ARM64_PTDUMP=y +CONFIG_FREE_PAGES_RDONLY=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_QMI_ENCDEC=y diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig index 28d10091da43..e55ebfc79ddb 100644 --- a/arch/arm64/configs/msm-auto-perf_defconfig +++ b/arch/arm64/configs/msm-auto-perf_defconfig @@ -220,6 +220,7 @@ CONFIG_RMNET_DATA_DEBUG_PKT=y CONFIG_SOCKEV_NLMCAST=y CONFIG_CAN=y CONFIG_CAN_RH850=y +CONFIG_CAN_K61=y CONFIG_BT=y CONFIG_MSM_BT_POWER=y CONFIG_BTFM_SLIM=y @@ -276,10 +277,10 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y CONFIG_CNSS_CRYPTO=y CONFIG_ATH_CARDS=y CONFIG_WIL6210=m -CONFIG_CNSS=y -CONFIG_CNSS_ASYNC=y CONFIG_CLD_LL_CORE=y CONFIG_BUS_AUTO_SUSPEND=y +CONFIG_CNSS2=y +CONFIG_CNSS2_DEBUG=y CONFIG_INPUT_EVDEV=y CONFIG_INPUT_KEYRESET=y CONFIG_KEYBOARD_GPIO=y @@ -334,6 +335,7 @@ CONFIG_MSM_BCL_PERIPHERAL_CTL=y CONFIG_MSM_PM=y CONFIG_APSS_CORE_EA=y CONFIG_MSM_APM=y +CONFIG_SENSORS_GPIO_FAN=y CONFIG_SENSORS_EPM_ADC=y CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y CONFIG_LIMITS_MONITOR=y diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig index 1783707399ca..8f8e696f8866 100644 --- a/arch/arm64/configs/msm-auto_defconfig +++ b/arch/arm64/configs/msm-auto_defconfig @@ -1,4 +1,5 @@ # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_FHANDLE=y CONFIG_AUDIT=y # CONFIG_AUDITSYSCALL is not set CONFIG_NO_HZ=y @@ -222,6 +223,7 @@ CONFIG_RMNET_DATA_DEBUG_PKT=y CONFIG_SOCKEV_NLMCAST=y CONFIG_CAN=y CONFIG_CAN_RH850=y +CONFIG_CAN_K61=y CONFIG_BT=y CONFIG_MSM_BT_POWER=y CONFIG_BTFM_SLIM=y @@ -231,6 +233,8 @@ CONFIG_CFG80211_INTERNAL_REGDB=y CONFIG_RFKILL=y CONFIG_IPC_ROUTER=y CONFIG_IPC_ROUTER_SECURITY=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y CONFIG_DMA_CMA=y CONFIG_ZRAM=y @@ -277,10 +281,10 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y CONFIG_CNSS_CRYPTO=y CONFIG_ATH_CARDS=y CONFIG_WIL6210=m -CONFIG_CNSS=y -CONFIG_CNSS_ASYNC=y CONFIG_CLD_LL_CORE=y CONFIG_BUS_AUTO_SUSPEND=y +CONFIG_CNSS2=y +CONFIG_CNSS2_DEBUG=y CONFIG_INPUT_EVDEV=y CONFIG_INPUT_KEYRESET=y CONFIG_KEYBOARD_GPIO=y @@ -310,7 +314,6 @@ CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y CONFIG_MSM_ADSPRPC=y CONFIG_MSM_RDBG=m -CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MUX=y CONFIG_I2C_QUP=y @@ -337,6 +340,7 @@ CONFIG_MSM_BCL_PERIPHERAL_CTL=y CONFIG_MSM_PM=y CONFIG_APSS_CORE_EA=y CONFIG_MSM_APM=y +CONFIG_SENSORS_GPIO_FAN=y CONFIG_SENSORS_EPM_ADC=y CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y CONFIG_LIMITS_MONITOR=y @@ -346,7 +350,6 @@ CONFIG_THERMAL_TSENS8974=y CONFIG_THERMAL_QPNP_ADC_TM=y CONFIG_MFD_SPMI_PMIC=y CONFIG_WCD9335_CODEC=y -CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_FAN53555=y CONFIG_REGULATOR_MAX20010=y @@ -378,15 +381,11 @@ CONFIG_MSM_AIS_CAMERA_SENSOR=y # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set CONFIG_VIDEO_ADV7481=y CONFIG_QCOM_KGSL=y +CONFIG_DRM=y CONFIG_MSM_BA_V4L2=y -CONFIG_FB=y -CONFIG_FB_MSM=y -CONFIG_FB_MSM_MDSS=y -CONFIG_FB_MSM_MDSS_WRITEBACK=y -CONFIG_FB_MSM_MDSS_HDMI_PANEL=y -CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y +CONFIG_MSM_DBA=y +CONFIG_MSM_DBA_ADV7533=y CONFIG_BACKLIGHT_LCD_SUPPORT=y -CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_GENERIC=m CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set @@ -470,7 +469,7 @@ CONFIG_STAGING=y CONFIG_ASHMEM=y CONFIG_ANDROID_TIMED_GPIO=y CONFIG_ANDROID_LOW_MEMORY_KILLER=y -CONFIG_SW_SYNC_USER=y +CONFIG_SYNC=y CONFIG_ION=y CONFIG_ION_MSM=y CONFIG_QPNP_REVID=y @@ -519,7 +518,6 @@ CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y CONFIG_MSM_GLINK_PKT=y CONFIG_MSM_SPM=y CONFIG_MSM_L2_SPM=y -CONFIG_QCOM_SCM=y CONFIG_QCOM_SCM_XPU=y CONFIG_QCOM_WATCHDOG_V2=y CONFIG_QCOM_MEMORY_DUMP_V2=y @@ -574,7 +572,6 @@ CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y -CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_ECRYPT_FS=y CONFIG_ECRYPT_FS_MESSAGING=y @@ -591,6 +588,7 @@ CONFIG_DEBUG_OBJECTS_TIMERS=y CONFIG_DEBUG_OBJECTS_WORK=y CONFIG_DEBUG_OBJECTS_RCU_HEAD=y CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_SLUB_DEBUG_ON=y CONFIG_DEBUG_KMEMLEAK=y CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y CONFIG_DEBUG_STACK_USAGE=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index 77157bb85ee1..3cbcc10ceb1d 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -698,6 +698,7 @@ CONFIG_IRQSOFF_TRACER=y CONFIG_PREEMPT_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_CPU_FREQ_SWITCH_PROFILER=y +CONFIG_LKDTM=y CONFIG_MEMTEST=y CONFIG_PANIC_ON_DATA_CORRUPTION=y CONFIG_ARM64_PTDUMP=y diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig index c92551c69bf8..0a5f7f1f0f2d 100644 --- a/arch/arm64/configs/sdm660-perf_defconfig +++ b/arch/arm64/configs/sdm660-perf_defconfig @@ -5,6 +5,9 @@ CONFIG_AUDIT=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_TASKSTATS=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y CONFIG_RCU_EXPERT=y CONFIG_RCU_FAST_NO_HZ=y CONFIG_RCU_NOCB_CPU=y @@ -93,6 +96,7 @@ CONFIG_XFRM_USER=y CONFIG_XFRM_STATISTICS=y CONFIG_NET_KEY=y CONFIG_INET=y +CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_VERBOSE=y @@ -242,6 +246,7 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_QSEECOM=y CONFIG_HDCP_QSEECOM=y +CONFIG_UID_SYS_STATS=y CONFIG_QPNP_MISC=y CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y @@ -311,6 +316,7 @@ CONFIG_SERIAL_MSM_SMD=y CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y +# CONFIG_DEVPORT is not set CONFIG_MSM_ADSPRPC=y CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y @@ -559,6 +565,7 @@ CONFIG_QCOM_SCM=y CONFIG_QCOM_WATCHDOG_V2=y CONFIG_QCOM_IRQ_HELPER=y CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_MINIDUMP=y CONFIG_ICNSS=y CONFIG_MSM_RUN_QUEUE_STATS=y CONFIG_MSM_BOOT_STATS=y @@ -610,6 +617,9 @@ CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y @@ -642,6 +652,7 @@ CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_SOURCE_DUMMY=y CONFIG_PFK=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig index 6b45087e2603..7819916fb841 100644 --- a/arch/arm64/configs/sdm660_defconfig +++ b/arch/arm64/configs/sdm660_defconfig @@ -97,6 +97,7 @@ CONFIG_XFRM_USER=y CONFIG_XFRM_STATISTICS=y CONFIG_NET_KEY=y CONFIG_INET=y +CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_VERBOSE=y @@ -320,6 +321,7 @@ CONFIG_SERIAL_MSM_SMD=y CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y +# CONFIG_DEVPORT is not set CONFIG_MSM_ADSPRPC=y CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y @@ -579,6 +581,7 @@ CONFIG_QCOM_SCM=y CONFIG_QCOM_WATCHDOG_V2=y CONFIG_QCOM_IRQ_HELPER=y CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_MINIDUMP=y CONFIG_ICNSS=y CONFIG_MSM_GLADIATOR_ERP_V2=y CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y @@ -636,6 +639,9 @@ CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y @@ -693,6 +699,7 @@ CONFIG_IRQSOFF_TRACER=y CONFIG_PREEMPT_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_CPU_FREQ_SWITCH_PROFILER=y +CONFIG_LKDTM=y CONFIG_MEMTEST=y CONFIG_PANIC_ON_DATA_CORRUPTION=y CONFIG_ARM64_PTDUMP=y @@ -715,6 +722,7 @@ CONFIG_CORESIGHT_TPDM=y CONFIG_CORESIGHT_QPDI=y CONFIG_CORESIGHT_SOURCE_DUMMY=y CONFIG_PFK=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 02e4b7e4bdbf..0e67a507dfbc 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -235,14 +235,25 @@ lr .req x30 // link register .endm /* + * @dst: Result of per_cpu(sym, smp_processor_id()) * @sym: The name of the per-cpu variable - * @reg: Result of per_cpu(sym, smp_processor_id()) * @tmp: scratch register */ - .macro this_cpu_ptr, sym, reg, tmp - adr_l \reg, \sym + .macro adr_this_cpu, dst, sym, tmp + adr_l \dst, \sym mrs \tmp, tpidr_el1 - add \reg, \reg, \tmp + add \dst, \dst, \tmp + .endm + + /* + * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id())) + * @sym: The name of the per-cpu variable + * @tmp: scratch register + */ + .macro ldr_this_cpu dst, sym, tmp + adr_l \dst, \sym + mrs \tmp, tpidr_el1 + ldr \dst, [\dst, \tmp] .endm /* diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h new file mode 100644 index 000000000000..483a6c9d3e10 --- /dev/null +++ b/arch/arm64/include/asm/current.h @@ -0,0 +1,35 @@ +#ifndef __ASM_CURRENT_H +#define __ASM_CURRENT_H + +#include <linux/compiler.h> + +#include <asm/sysreg.h> + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_THREAD_INFO_IN_TASK +struct task_struct; + +/* + * We don't use read_sysreg() as we want the compiler to cache the value where + * possible. + */ +static __always_inline struct task_struct *get_current(void) +{ + unsigned long sp_el0; + + asm ("mrs %0, sp_el0" : "=r" (sp_el0)); + + return (struct task_struct *)sp_el0; +} +#define current get_current() +#else +#include <linux/thread_info.h> +#define get_current() (current_thread_info()->task) +#define current get_current() +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_CURRENT_H */ + diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index b98332269462..9d9287277201 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -115,10 +115,10 @@ /* * This is the base location for PIE (ET_DYN with INTERP) loads. On - * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * 64-bit, this is above 4GB to leave the entire 32-bit address * space open for things that want to use the area for 32-bit pointers. */ -#define ELF_ET_DYN_BASE 0x100000000UL +#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 8a336852eeba..2ce1a0262a59 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -16,6 +16,8 @@ #ifndef __ASM_PERCPU_H #define __ASM_PERCPU_H +#include <asm/stack_pointer.h> + static inline void set_my_cpu_offset(unsigned long off) { asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index da4397e14e0d..0c38c189fb3b 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -17,6 +17,8 @@ #ifndef __ASM_PERF_EVENT_H #define __ASM_PERF_EVENT_H +#include <asm/stack_pointer.h> + #ifdef CONFIG_PERF_EVENTS struct pt_regs; extern unsigned long perf_instruction_pointer(struct pt_regs *regs); diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 2013a4dc5124..4325b3622a92 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -16,11 +16,22 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H +#include <asm/percpu.h> + #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/thread_info.h> -#define raw_smp_processor_id() (current_thread_info()->cpu) +DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); + +/* + * We don't use this_cpu_read(cpu_number) as that has implicit writes to + * preempt_count, and associated (compiler) barriers, that we'd like to avoid + * the expense of. If we're preemptible, the value can be stale at use anyway. + * And we can't use this_cpu_ptr() either, as that winds up recursing back + * here under CONFIG_DEBUG_PREEMPT=y. + */ +#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number)) struct seq_file; @@ -57,6 +68,9 @@ asmlinkage void secondary_start_kernel(void); */ struct secondary_data { void *stack; +#ifdef CONFIG_THREAD_INFO_IN_TASK + struct task_struct *task; +#endif }; extern struct secondary_data secondary_data; extern void secondary_entry(void); diff --git a/arch/arm64/include/asm/stack_pointer.h b/arch/arm64/include/asm/stack_pointer.h new file mode 100644 index 000000000000..ffcdf742cddf --- /dev/null +++ b/arch/arm64/include/asm/stack_pointer.h @@ -0,0 +1,9 @@ +#ifndef __ASM_STACK_POINTER_H +#define __ASM_STACK_POINTER_H + +/* + * how to get the current stack pointer from C + */ +register unsigned long current_stack_pointer asm ("sp"); + +#endif /* __ASM_STACK_POINTER_H */ diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 024d623f662e..92d6a628e478 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -1,7 +1,7 @@ #ifndef __ASM_SUSPEND_H #define __ASM_SUSPEND_H -#define NR_CTX_REGS 10 +#define NR_CTX_REGS 12 #define NR_CALLEE_SAVED_REGS 12 /* diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 4bb038ec6453..db6d058ab0f3 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -36,25 +36,36 @@ struct task_struct; +#include <asm/stack_pointer.h> #include <asm/types.h> typedef unsigned long mm_segment_t; /* * low level task data that entry.S needs immediate access to. - * __switch_to() assumes cpu_context follows immediately after cpu_domain. */ struct thread_info { unsigned long flags; /* low level flags */ mm_segment_t addr_limit; /* address limit */ +#ifndef CONFIG_THREAD_INFO_IN_TASK struct task_struct *task; /* main task structure */ +#endif #ifdef CONFIG_ARM64_SW_TTBR0_PAN u64 ttbr0; /* saved TTBR0_EL1 */ #endif int preempt_count; /* 0 => preemptable, <0 => bug */ +#ifndef CONFIG_THREAD_INFO_IN_TASK int cpu; /* cpu */ +#endif }; +#ifdef CONFIG_THREAD_INFO_IN_TASK +#define INIT_THREAD_INFO(tsk) \ +{ \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ +} +#else #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ @@ -63,14 +74,6 @@ struct thread_info { .addr_limit = KERNEL_DS, \ } -#define init_thread_info (init_thread_union.thread_info) -#define init_stack (init_thread_union.stack) - -/* - * how to get the current stack pointer from C - */ -register unsigned long current_stack_pointer asm ("sp"); - /* * how to get the thread information struct from C */ @@ -88,6 +91,11 @@ static inline struct thread_info *current_thread_info(void) return (struct thread_info *)sp_el0; } +#define init_thread_info (init_thread_union.thread_info) +#endif + +#define init_stack (init_thread_union.stack) + #define thread_saved_pc(tsk) \ ((unsigned long)(tsk->thread.cpu_context.pc)) #define thread_saved_sp(tsk) \ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 350c0e99fc6b..36c4307c4af3 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -35,11 +35,16 @@ int main(void) { DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); BLANK(); +#ifdef CONFIG_THREAD_INFO_IN_TASK + DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); + DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); + DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit)); + DEFINE(TSK_STACK, offsetof(struct task_struct, stack)); +#else DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); +#endif #ifdef CONFIG_ARM64_SW_TTBR0_PAN DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0)); #endif @@ -124,6 +129,11 @@ int main(void) DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); BLANK(); +#ifdef CONFIG_THREAD_INFO_IN_TASK + DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack)); + DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task)); + BLANK(); +#endif #ifdef CONFIG_KVM_ARM_HOST DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 7822e36d87fb..381f9febcdb6 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -93,9 +93,14 @@ .if \el == 0 mrs x21, sp_el0 +#ifdef CONFIG_THREAD_INFO_IN_TASK + ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear, + ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug +#else mov tsk, sp and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear, ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug +#endif disable_step_tsk x19, x20 // exceptions when scheduling. mov x29, xzr // fp pointed to user-space @@ -103,10 +108,18 @@ add x21, sp, #S_FRAME_SIZE get_thread_info tsk /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ +#ifdef CONFIG_THREAD_INFO_IN_TASK + ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] +#else ldr x20, [tsk, #TI_ADDR_LIMIT] +#endif str x20, [sp, #S_ORIG_ADDR_LIMIT] mov x20, #TASK_SIZE_64 +#ifdef CONFIG_THREAD_INFO_IN_TASK + str x20, [tsk, #TSK_TI_ADDR_LIMIT] +#else str x20, [tsk, #TI_ADDR_LIMIT] +#endif ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO) .endif /* \el == 0 */ mrs x22, elr_el1 @@ -168,7 +181,11 @@ alternative_else_nop_endif .if \el != 0 /* Restore the task's original addr_limit. */ ldr x20, [sp, #S_ORIG_ADDR_LIMIT] +#ifdef CONFIG_THREAD_INFO_IN_TASK + str x20, [tsk, #TSK_TI_ADDR_LIMIT] +#else str x20, [tsk, #TI_ADDR_LIMIT] +#endif /* No need to restore UAO, it will be restored from SPSR_EL1 */ .endif @@ -258,15 +275,22 @@ alternative_endif mov x19, sp // preserve the original sp /* - * Compare sp with the current thread_info, if the top - * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and - * should switch to the irq stack. + * Compare sp with the base of the task stack. + * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, + * and should switch to the irq stack. */ +#ifdef CONFIG_THREAD_INFO_IN_TASK + ldr x25, [tsk, TSK_STACK] + eor x25, x25, x19 + and x25, x25, #~(THREAD_SIZE - 1) + cbnz x25, 9998f +#else and x25, x19, #~(THREAD_SIZE - 1) cmp x25, tsk b.ne 9998f +#endif - this_cpu_ptr irq_stack, x25, x26 + adr_this_cpu x25, irq_stack, x26 mov x26, #IRQ_STACK_START_SP add x26, x25, x26 @@ -498,9 +522,17 @@ el1_irq: irq_handler #ifdef CONFIG_PREEMPT +#ifdef CONFIG_THREAD_INFO_IN_TASK + ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count +#else ldr w24, [tsk, #TI_PREEMPT] // get preempt count +#endif cbnz w24, 1f // preempt count != 0 +#ifdef CONFIG_THREAD_INFO_IN_TASK + ldr x0, [tsk, #TSK_TI_FLAGS] // get flags +#else ldr x0, [tsk, #TI_FLAGS] // get flags +#endif tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? bl el1_preempt 1: @@ -515,7 +547,11 @@ ENDPROC(el1_irq) el1_preempt: mov x24, lr 1: bl preempt_schedule_irq // irq en/disable is done inside +#ifdef CONFIG_THREAD_INFO_IN_TASK + ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS +#else ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS +#endif tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? ret x24 #endif @@ -773,8 +809,12 @@ ENTRY(cpu_switch_to) mov v15.16b, v15.16b #endif mov sp, x9 +#ifdef CONFIG_THREAD_INFO_IN_TASK + msr sp_el0, x1 +#else and x9, x9, #~(THREAD_SIZE - 1) msr sp_el0, x9 +#endif ret ENDPROC(cpu_switch_to) @@ -785,7 +825,11 @@ ENDPROC(cpu_switch_to) ret_fast_syscall: disable_irq // disable interrupts str x0, [sp, #S_X0] // returned x0 +#ifdef CONFIG_THREAD_INFO_IN_TASK + ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing +#else ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing +#endif and x2, x1, #_TIF_SYSCALL_WORK cbnz x2, ret_fast_syscall_trace and x2, x1, #_TIF_WORK_MASK @@ -817,7 +861,11 @@ work_resched: */ ret_to_user: disable_irq // disable interrupts +#ifdef CONFIG_THREAD_INFO_IN_TASK + ldr x1, [tsk, #TSK_TI_FLAGS] +#else ldr x1, [tsk, #TI_FLAGS] +#endif and x2, x1, #_TIF_WORK_MASK cbnz x2, work_pending enable_step_tsk x1, x2 @@ -849,7 +897,11 @@ el0_svc_naked: // compat entry point enable_dbg_and_irq ct_user_exit 1 +#ifdef CONFIG_THREAD_INFO_IN_TASK + ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks +#else ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks +#endif tst x16, #_TIF_SYSCALL_WORK b.ne __sys_trace cmp scno, sc_nr // check upper syscall limit diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a1c2ac38771d..0a0cd0476665 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -418,6 +418,7 @@ ENDPROC(__create_page_tables) .set initial_sp, init_thread_union + THREAD_START_SP __primary_switched: mov x28, lr // preserve LR + adr_l x8, vectors // load VBAR_EL1 with virtual msr vbar_el1, x8 // vector table address isb @@ -430,10 +431,18 @@ __primary_switched: bl __pi_memset dsb ishst // Make zero page visible to PTW +#ifdef CONFIG_THREAD_INFO_IN_TASK + adrp x4, init_thread_union + add sp, x4, #THREAD_SIZE + adr_l x5, init_task + msr sp_el0, x5 // Save thread_info +#else adr_l sp, initial_sp, x4 mov x4, sp and x4, x4, #~(THREAD_SIZE - 1) msr sp_el0, x4 // Save thread_info +#endif + str_l x21, __fdt_pointer, x5 // Save FDT pointer ldr_l x4, kimage_vaddr // Save the offset between @@ -642,11 +651,18 @@ __secondary_switched: adr_l x5, vectors msr vbar_el1, x5 isb - +#ifdef CONFIG_THREAD_INFO_IN_TASK + adr_l x0, secondary_data + ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack + mov sp, x1 + ldr x2, [x0, #CPU_BOOT_TASK] + msr sp_el0, x2 +#else ldr_l x0, secondary_data // get secondary_data.stack mov sp, x0 and x0, x0, #~(THREAD_SIZE - 1) msr sp_el0, x0 // save thread_info +#endif mov x29, #0 b secondary_start_kernel ENDPROC(__secondary_switched) diff --git a/arch/arm64/kernel/perf_trace_counters.c b/arch/arm64/kernel/perf_trace_counters.c index dc92b29ac103..7b852e36eaa2 100644 --- a/arch/arm64/kernel/perf_trace_counters.c +++ b/arch/arm64/kernel/perf_trace_counters.c @@ -65,7 +65,7 @@ void tracectr_notifier(void *ignore, bool preempt, { u32 cnten_val; int current_pid; - u32 cpu = task_thread_info(next)->cpu; + u32 cpu = task_cpu(next); if (tp_pid_state != 1) return; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fc0a7aa2ca82..9918489f5af3 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -45,6 +45,9 @@ #include <linux/personality.h> #include <linux/notifier.h> #include <trace/events/power.h> +#ifdef CONFIG_THREAD_INFO_IN_TASK +#include <linux/percpu.h> +#endif #include <asm/alternative.h> #include <asm/compat.h> @@ -394,6 +397,22 @@ void uao_thread_switch(struct task_struct *next) } } +#ifdef CONFIG_THREAD_INFO_IN_TASK +/* + * We store our current task in sp_el0, which is clobbered by userspace. Keep a + * shadow copy so that we can restore this upon entry from userspace. + * + * This is *only* for exception entry from EL0, and is not valid until we + * __switch_to() a user task. + */ +DEFINE_PER_CPU(struct task_struct *, __entry_task); + +static void entry_task_switch(struct task_struct *next) +{ + __this_cpu_write(__entry_task, next); +} +#endif + /* * Thread switching. */ @@ -406,6 +425,9 @@ struct task_struct *__switch_to(struct task_struct *prev, tls_thread_switch(next); hw_breakpoint_thread_switch(next); contextidr_thread_switch(next); +#ifdef CONFIG_THREAD_INFO_IN_TASK + entry_task_switch(next); +#endif uao_thread_switch(next); /* @@ -423,27 +445,35 @@ struct task_struct *__switch_to(struct task_struct *prev, unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; - unsigned long stack_page; + unsigned long stack_page, ret = 0; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; + stack_page = (unsigned long)try_get_task_stack(p); + if (!stack_page) + return 0; + frame.fp = thread_saved_fp(p); frame.sp = thread_saved_sp(p); frame.pc = thread_saved_pc(p); #ifdef CONFIG_FUNCTION_GRAPH_TRACER frame.graph = p->curr_ret_stack; #endif - stack_page = (unsigned long)task_stack_page(p); do { if (frame.sp < stack_page || frame.sp >= stack_page + THREAD_SIZE || unwind_frame(p, &frame)) - return 0; - if (!in_sched_functions(frame.pc)) - return frame.pc; + goto out; + if (!in_sched_functions(frame.pc)) { + ret = frame.pc; + goto out; + } } while (count ++ < 16); - return 0; + +out: + put_task_stack(p); + return ret; } unsigned long arch_align_stack(unsigned long sp) diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index 1718706fde83..12a87f2600f2 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -12,6 +12,7 @@ #include <linux/export.h> #include <linux/ftrace.h> +#include <asm/stack_pointer.h> #include <asm/stacktrace.h> struct return_address_data { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 30af178c640c..b8b40d95ebef 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -362,12 +362,16 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_ARM64_SW_TTBR0_PAN /* - * Make sure init_thread_info.ttbr0 always generates translation + * Make sure thread_info.ttbr0 always generates translation * faults in case uaccess_enable() is inadvertently called by the init * thread. */ +#ifdef CONFIG_THREAD_INFO_IN_TASK + init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page); +#else init_thread_info.ttbr0 = virt_to_phys(empty_zero_page); #endif +#endif #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 9a3aec97ac09..9e3cb29e4c50 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -124,9 +124,6 @@ ENTRY(_cpu_resume) /* load sp from context */ ldr x2, [x0, #CPU_CTX_SP] mov sp, x2 - /* save thread_info */ - and x2, x2, #~(THREAD_SIZE - 1) - msr sp_el0, x2 /* * cpu_do_resume expects x0 to contain context address pointer */ diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index a3a6b2ea9b4d..961fd5d2e7ce 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -54,10 +54,14 @@ #include <asm/ptrace.h> #include <asm/virt.h> #include <asm/edac.h> +#include <soc/qcom/minidump.h> #define CREATE_TRACE_POINTS #include <trace/events/ipi.h> +DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number); +EXPORT_PER_CPU_SYMBOL(cpu_number); + /* * as from 2.5, kernels no longer have an init_tasks structure * so we need some other way of telling a new secondary core @@ -97,6 +101,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) * We need to tell the secondary core where to find its stack and the * page tables. */ +#ifdef CONFIG_THREAD_INFO_IN_TASK + secondary_data.task = idle; +#endif secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; __flush_dcache_area(&secondary_data, sizeof(secondary_data)); @@ -120,6 +127,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) pr_err("CPU%u: failed to boot: %d\n", cpu, ret); } +#ifdef CONFIG_THREAD_INFO_IN_TASK + secondary_data.task = NULL; +#endif secondary_data.stack = NULL; return ret; @@ -137,7 +147,12 @@ static void smp_store_cpu_info(unsigned int cpuid) asmlinkage void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; - unsigned int cpu = smp_processor_id(); + unsigned int cpu; + + cpu = task_cpu(current); + set_my_cpu_offset(per_cpu_offset(cpu)); + + pr_debug("CPU%u: Booted secondary processor\n", cpu); /* * All kernel threads share the same mm context; grab a @@ -146,10 +161,6 @@ asmlinkage void secondary_start_kernel(void) atomic_inc(&mm->mm_count); current->active_mm = mm; - set_my_cpu_offset(per_cpu_offset(smp_processor_id())); - - pr_debug("CPU%u: Booted secondary processor\n", cpu); - /* * TTBR0 is only used for the identity mapping at this stage. Make it * point to zero page to avoid speculatively fetching new entries. @@ -632,6 +643,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) if (max_cpus == 0) break; + per_cpu(cpu_number, cpu) = cpu; + if (cpu == smp_processor_id()) continue; @@ -740,6 +753,7 @@ static void ipi_cpu_stop(unsigned int cpu, struct pt_regs *regs) pr_crit("CPU%u: stopping\n", cpu); show_regs(regs); dump_stack(); + dump_stack_minidump(regs->sp); arm64_check_cache_ecc(NULL); raw_spin_unlock(&stop_lock); } diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 1fd1a9a6596f..2ac2abe8a494 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -23,6 +23,7 @@ #include <linux/stacktrace.h> #include <asm/irq.h> +#include <asm/stack_pointer.h> #include <asm/stacktrace.h> /* @@ -130,7 +131,6 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, break; } } -EXPORT_SYMBOL(walk_stackframe); #ifdef CONFIG_STACKTRACE struct stack_trace_data { @@ -157,24 +157,29 @@ static int save_trace(struct stackframe *frame, void *d) return trace->nr_entries >= trace->max_entries; } -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +static noinline void __save_stack_trace(struct task_struct *tsk, + struct stack_trace *trace, unsigned int nosched) { struct stack_trace_data data; struct stackframe frame; + if (!try_get_task_stack(tsk)) + return; + data.trace = trace; data.skip = trace->skip; + data.no_sched_functions = nosched; if (tsk != current) { - data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.pc = thread_saved_pc(tsk); } else { - data.no_sched_functions = 0; + /* We don't want this function nor the caller */ + data.skip += 2; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; - frame.pc = (unsigned long)save_stack_trace_tsk; + frame.pc = (unsigned long)__save_stack_trace; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER frame.graph = tsk->curr_ret_stack; @@ -183,12 +188,20 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) walk_stackframe(tsk, &frame, save_trace, &data); if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; + + put_task_stack(tsk); } EXPORT_SYMBOL(save_stack_trace_tsk); +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + __save_stack_trace(tsk, trace, 1); +} + void save_stack_trace(struct stack_trace *trace) { - save_stack_trace_tsk(current, trace); + __save_stack_trace(current, trace, 0); } + EXPORT_SYMBOL_GPL(save_stack_trace); #endif diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 0acdb63d19b6..468b939f3471 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -45,12 +45,6 @@ void notrace __cpu_suspend_exit(void) cpu_uninstall_idmap(); /* - * Restore per-cpu offset before any kernel - * subsystem relying on it has a chance to run. - */ - set_my_cpu_offset(per_cpu_offset(smp_processor_id())); - - /* * Restore HW breakpoint registers to sane values * before debug exceptions are possibly reenabled * through local_dbg_restore. diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index f4ab8bc661da..ea40dc101433 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -38,6 +38,7 @@ #include <asm/esr.h> #include <asm/insn.h> #include <asm/traps.h> +#include <asm/stack_pointer.h> #include <asm/stacktrace.h> #include <asm/exception.h> #include <asm/system_misc.h> @@ -153,6 +154,14 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) unsigned long irq_stack_ptr; int skip; + pr_debug("%s(regs = %pK tsk = %pK)\n", __func__, regs, tsk); + + if (!tsk) + tsk = current; + + if (!try_get_task_stack(tsk)) + return; + /* * Switching between stacks is valid when tracing current and in * non-preemptible context. @@ -223,6 +232,8 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) stack + sizeof(struct pt_regs), false); } } + + put_task_stack(tsk); } void show_stack(struct task_struct *tsk, unsigned long *sp) @@ -238,10 +249,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) #endif #define S_SMP " SMP" -static int __die(const char *str, int err, struct thread_info *thread, - struct pt_regs *regs) +static int __die(const char *str, int err, struct pt_regs *regs) { - struct task_struct *tsk = thread->task; + struct task_struct *tsk = current; static int die_counter; int ret; @@ -256,7 +266,8 @@ static int __die(const char *str, int err, struct thread_info *thread, print_modules(); __show_regs(regs); pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", - TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), + end_of_stack(tsk)); if (!user_mode(regs) || in_interrupt()) { dump_backtrace(regs, tsk); @@ -321,7 +332,6 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int notify) */ void die(const char *str, struct pt_regs *regs, int err) { - struct thread_info *thread = current_thread_info(); enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; unsigned long flags = oops_begin(); int ret; @@ -331,7 +341,7 @@ void die(const char *str, struct pt_regs *regs, int err) if (bug_type != BUG_TRAP_TYPE_NONE) str = "Oops - BUG"; - ret = __die(str, err, thread, regs); + ret = __die(str, err, regs); oops_end(flags, regs, ret); } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index a41178f8eeea..159c79612e63 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -3,6 +3,7 @@ * * Copyright (C) 2012 ARM Ltd. * Author: Catalin Marinas <catalin.marinas@arm.com> + * Copyright (c) 2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -49,17 +50,6 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, return prot; } -static int __get_iommu_pgprot(struct dma_attrs *attrs, int prot, - bool coherent) -{ - if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs)) - prot |= IOMMU_NOEXEC; - if (coherent) - prot |= IOMMU_CACHE; - - return prot; -} - static bool is_dma_coherent(struct device *dev, struct dma_attrs *attrs) { bool is_coherent; @@ -930,7 +920,6 @@ static struct dma_map_ops iommu_dma_ops = { .sync_single_for_device = __iommu_sync_single_for_device, .sync_sg_for_cpu = __iommu_sync_sg_for_cpu, .sync_sg_for_device = __iommu_sync_sg_for_device, - .dma_supported = iommu_dma_supported, .mapping_error = iommu_dma_mapping_error, }; @@ -1145,6 +1134,17 @@ EXPORT_SYMBOL(arch_setup_dma_ops); #ifdef CONFIG_ARM64_DMA_USE_IOMMU +static int __get_iommu_pgprot(struct dma_attrs *attrs, int prot, + bool coherent) +{ + if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs)) + prot |= IOMMU_NOEXEC; + if (coherent) + prot |= IOMMU_CACHE; + + return prot; +} + /* * Make an area consistent for devices. * Note: Drivers should NOT use this function directly, as it will break diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 81a0de4e457d..d780180106c1 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -116,11 +116,14 @@ ENTRY(cpu_do_suspend) mrs x8, mdscr_el1 mrs x9, oslsr_el1 mrs x10, sctlr_el1 + mrs x11, tpidr_el1 + mrs x12, sp_el0 stp x2, x3, [x0] stp x4, xzr, [x0, #16] stp x5, x6, [x0, #32] stp x7, x8, [x0, #48] stp x9, x10, [x0, #64] + stp x11, x12, [x0, #80] ret ENDPROC(cpu_do_suspend) @@ -135,6 +138,7 @@ ENTRY(cpu_do_resume) ldp x6, x8, [x0, #32] ldp x9, x10, [x0, #48] ldp x11, x12, [x0, #64] + ldp x13, x14, [x0, #80] msr tpidr_el0, x2 msr tpidrro_el0, x3 msr contextidr_el1, x4 @@ -148,6 +152,8 @@ ENTRY(cpu_do_resume) msr vbar_el1, x9 msr mdscr_el1, x10 msr sctlr_el1, x12 + msr tpidr_el1, x13 + msr sp_el0, x14 /* * Restore oslsr_el1 by writing oslar_el1 */ diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 2c86a4ef6742..7091a367eeda 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -45,7 +45,7 @@ config IA64 select GENERIC_SMP_IDLE_THREAD select ARCH_INIT_TASK select ARCH_TASK_STRUCT_ALLOCATOR - select ARCH_THREAD_INFO_ALLOCATOR + select ARCH_THREAD_STACK_ALLOCATOR select ARCH_CLOCKSOURCE_DATA select GENERIC_TIME_VSYSCALL_OLD select SYSCTL_ARCH_UNALIGN_NO_WARN diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index aa995b67c3f5..d1212b84fb83 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -48,15 +48,15 @@ struct thread_info { #ifndef ASM_OFFSETS_C /* how to get the thread information struct from C */ #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) -#define alloc_thread_info_node(tsk, node) \ - ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) +#define alloc_thread_stack_node(tsk, node) \ + ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE)) #define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) #else #define current_thread_info() ((struct thread_info *) 0) -#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0) +#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0) #define task_thread_info(tsk) ((struct thread_info *) 0) #endif -#define free_thread_info(ti) /* nothing */ +#define free_thread_stack(ti) /* nothing */ #define task_stack_page(tsk) ((void *)(tsk)) #define __HAVE_THREAD_FUNCTIONS diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index f9efe9739d3f..0eaa89f3defd 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -26,6 +26,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); * handled. This is done by having a special ".data..init_task" section... */ #define init_thread_info init_task_mem.s.thread_info +#define init_stack init_task_mem.stack union { struct { diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h index 4861a78c7160..f5f90bbf019d 100644 --- a/arch/mn10300/include/asm/thread_info.h +++ b/arch/mn10300/include/asm/thread_info.h @@ -115,7 +115,7 @@ static inline unsigned long current_stack_pointer(void) } #ifndef CONFIG_KGDB -void arch_release_thread_info(struct thread_info *ti); +void arch_release_thread_stack(unsigned long *stack); #endif #define get_thread_info(ti) get_task_struct((ti)->task) #define put_thread_info(ti) put_task_struct((ti)->task) diff --git a/arch/mn10300/kernel/kgdb.c b/arch/mn10300/kernel/kgdb.c index 99770823451a..2d7986c386fe 100644 --- a/arch/mn10300/kernel/kgdb.c +++ b/arch/mn10300/kernel/kgdb.c @@ -397,8 +397,9 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs) * single-step state is cleared. At this point the breakpoints should have * been removed by __switch_to(). */ -void arch_release_thread_info(struct thread_info *ti) +void arch_release_thread_stack(unsigned long *stack) { + struct thread_info *ti = (void *)stack; if (kgdb_sstep_thread == ti) { kgdb_sstep_thread = NULL; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 0e2919dd8df3..1395eeb6005f 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1250,7 +1250,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) insn_count = bpf_jit_insn(jit, fp, i); if (insn_count < 0) return -1; - jit->addrs[i + 1] = jit->prg; /* Next instruction address */ + /* Next instruction address */ + jit->addrs[i + insn_count] = jit->prg; } bpf_jit_epilogue(jit); diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 349dd23e2876..0cdeb2b483a0 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -25,9 +25,11 @@ void destroy_context(struct mm_struct *mm); void __tsb_context_switch(unsigned long pgd_pa, struct tsb_config *tsb_base, struct tsb_config *tsb_huge, - unsigned long tsb_descr_pa); + unsigned long tsb_descr_pa, + unsigned long secondary_ctx); -static inline void tsb_context_switch(struct mm_struct *mm) +static inline void tsb_context_switch_ctx(struct mm_struct *mm, + unsigned long ctx) { __tsb_context_switch(__pa(mm->pgd), &mm->context.tsb_block[0], @@ -38,9 +40,12 @@ static inline void tsb_context_switch(struct mm_struct *mm) #else NULL #endif - , __pa(&mm->context.tsb_descr[0])); + , __pa(&mm->context.tsb_descr[0]), + ctx); } +#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0) + void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss); @@ -110,8 +115,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str * cpu0 to update it's TSB because at that point the cpu_vm_mask * only had cpu1 set in it. */ - load_secondary_context(mm); - tsb_context_switch(mm); + tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); /* Any time a processor runs a context on an address space * for the first time, we must flush that context out of the diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h index ec9c04de3664..ff05992dae7a 100644 --- a/arch/sparc/include/asm/trap_block.h +++ b/arch/sparc/include/asm/trap_block.h @@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS]; void init_cur_cpu_trap(struct thread_info *); void setup_tba(void); extern int ncpus_probed; +extern u64 cpu_mondo_counter[NR_CPUS]; unsigned long real_hard_smp_processor_id(void); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 95a9fa0d2195..4511caa3b7e9 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -617,22 +617,48 @@ retry: } } -/* Multi-cpu list version. */ +#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid]) +#define MONDO_USEC_WAIT_MIN 2 +#define MONDO_USEC_WAIT_MAX 100 +#define MONDO_RETRY_LIMIT 500000 + +/* Multi-cpu list version. + * + * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'. + * Sometimes not all cpus receive the mondo, requiring us to re-send + * the mondo until all cpus have received, or cpus are truly stuck + * unable to receive mondo, and we timeout. + * Occasionally a target cpu strand is borrowed briefly by hypervisor to + * perform guest service, such as PCIe error handling. Consider the + * service time, 1 second overall wait is reasonable for 1 cpu. + * Here two in-between mondo check wait time are defined: 2 usec for + * single cpu quick turn around and up to 100usec for large cpu count. + * Deliver mondo to large number of cpus could take longer, we adjusts + * the retry count as long as target cpus are making forward progress. + */ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) { - int retries, this_cpu, prev_sent, i, saw_cpu_error; + int this_cpu, tot_cpus, prev_sent, i, rem; + int usec_wait, retries, tot_retries; + u16 first_cpu = 0xffff; + unsigned long xc_rcvd = 0; unsigned long status; + int ecpuerror_id = 0; + int enocpu_id = 0; u16 *cpu_list; + u16 cpu; this_cpu = smp_processor_id(); - cpu_list = __va(tb->cpu_list_pa); - - saw_cpu_error = 0; - retries = 0; + usec_wait = cnt * MONDO_USEC_WAIT_MIN; + if (usec_wait > MONDO_USEC_WAIT_MAX) + usec_wait = MONDO_USEC_WAIT_MAX; + retries = tot_retries = 0; + tot_cpus = cnt; prev_sent = 0; + do { - int forward_progress, n_sent; + int n_sent, mondo_delivered, target_cpu_busy; status = sun4v_cpu_mondo_send(cnt, tb->cpu_list_pa, @@ -640,94 +666,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) /* HV_EOK means all cpus received the xcall, we're done. */ if (likely(status == HV_EOK)) - break; + goto xcall_done; + + /* If not these non-fatal errors, panic */ + if (unlikely((status != HV_EWOULDBLOCK) && + (status != HV_ECPUERROR) && + (status != HV_ENOCPU))) + goto fatal_errors; /* First, see if we made any forward progress. * + * Go through the cpu_list, count the target cpus that have + * received our mondo (n_sent), and those that did not (rem). + * Re-pack cpu_list with the cpus remain to be retried in the + * front - this simplifies tracking the truly stalled cpus. + * * The hypervisor indicates successful sends by setting * cpu list entries to the value 0xffff. + * + * EWOULDBLOCK means some target cpus did not receive the + * mondo and retry usually helps. + * + * ECPUERROR means at least one target cpu is in error state, + * it's usually safe to skip the faulty cpu and retry. + * + * ENOCPU means one of the target cpu doesn't belong to the + * domain, perhaps offlined which is unexpected, but not + * fatal and it's okay to skip the offlined cpu. */ + rem = 0; n_sent = 0; for (i = 0; i < cnt; i++) { - if (likely(cpu_list[i] == 0xffff)) + cpu = cpu_list[i]; + if (likely(cpu == 0xffff)) { n_sent++; + } else if ((status == HV_ECPUERROR) && + (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) { + ecpuerror_id = cpu + 1; + } else if (status == HV_ENOCPU && !cpu_online(cpu)) { + enocpu_id = cpu + 1; + } else { + cpu_list[rem++] = cpu; + } } - forward_progress = 0; - if (n_sent > prev_sent) - forward_progress = 1; + /* No cpu remained, we're done. */ + if (rem == 0) + break; - prev_sent = n_sent; + /* Otherwise, update the cpu count for retry. */ + cnt = rem; - /* If we get a HV_ECPUERROR, then one or more of the cpus - * in the list are in error state. Use the cpu_state() - * hypervisor call to find out which cpus are in error state. + /* Record the overall number of mondos received by the + * first of the remaining cpus. */ - if (unlikely(status == HV_ECPUERROR)) { - for (i = 0; i < cnt; i++) { - long err; - u16 cpu; + if (first_cpu != cpu_list[0]) { + first_cpu = cpu_list[0]; + xc_rcvd = CPU_MONDO_COUNTER(first_cpu); + } - cpu = cpu_list[i]; - if (cpu == 0xffff) - continue; + /* Was any mondo delivered successfully? */ + mondo_delivered = (n_sent > prev_sent); + prev_sent = n_sent; - err = sun4v_cpu_state(cpu); - if (err == HV_CPU_STATE_ERROR) { - saw_cpu_error = (cpu + 1); - cpu_list[i] = 0xffff; - } - } - } else if (unlikely(status != HV_EWOULDBLOCK)) - goto fatal_mondo_error; + /* or, was any target cpu busy processing other mondos? */ + target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu)); + xc_rcvd = CPU_MONDO_COUNTER(first_cpu); - /* Don't bother rewriting the CPU list, just leave the - * 0xffff and non-0xffff entries in there and the - * hypervisor will do the right thing. - * - * Only advance timeout state if we didn't make any - * forward progress. + /* Retry count is for no progress. If we're making progress, + * reset the retry count. */ - if (unlikely(!forward_progress)) { - if (unlikely(++retries > 10000)) - goto fatal_mondo_timeout; - - /* Delay a little bit to let other cpus catch up - * on their cpu mondo queue work. - */ - udelay(2 * cnt); + if (likely(mondo_delivered || target_cpu_busy)) { + tot_retries += retries; + retries = 0; + } else if (unlikely(retries > MONDO_RETRY_LIMIT)) { + goto fatal_mondo_timeout; } - } while (1); - if (unlikely(saw_cpu_error)) - goto fatal_mondo_cpu_error; + /* Delay a little bit to let other cpus catch up on + * their cpu mondo queue work. + */ + if (!mondo_delivered) + udelay(usec_wait); - return; + retries++; + } while (1); -fatal_mondo_cpu_error: - printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " - "(including %d) were in error state\n", - this_cpu, saw_cpu_error - 1); +xcall_done: + if (unlikely(ecpuerror_id > 0)) { + pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n", + this_cpu, ecpuerror_id - 1); + } else if (unlikely(enocpu_id > 0)) { + pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n", + this_cpu, enocpu_id - 1); + } return; +fatal_errors: + /* fatal errors include bad alignment, etc */ + pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n", + this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); + panic("Unexpected SUN4V mondo error %lu\n", status); + fatal_mondo_timeout: - printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " - " progress after %d retries.\n", - this_cpu, retries); - goto dump_cpu_list_and_out; - -fatal_mondo_error: - printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", - this_cpu, status); - printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " - "mondo_block_pa(%lx)\n", - this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); - -dump_cpu_list_and_out: - printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); - for (i = 0; i < cnt; i++) - printk("%u ", cpu_list[i]); - printk("]\n"); + /* some cpus being non-responsive to the cpu mondo */ + pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n", + this_cpu, first_cpu, (tot_retries + retries), tot_cpus); + panic("SUN4V mondo timeout panic\n"); } static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S index 559bc5e9c199..34631995859a 100644 --- a/arch/sparc/kernel/sun4v_ivec.S +++ b/arch/sparc/kernel/sun4v_ivec.S @@ -26,6 +26,21 @@ sun4v_cpu_mondo: ldxa [%g0] ASI_SCRATCHPAD, %g4 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 + /* Get smp_processor_id() into %g3 */ + sethi %hi(trap_block), %g5 + or %g5, %lo(trap_block), %g5 + sub %g4, %g5, %g3 + srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + + /* Increment cpu_mondo_counter[smp_processor_id()] */ + sethi %hi(cpu_mondo_counter), %g5 + or %g5, %lo(cpu_mondo_counter), %g5 + sllx %g3, 3, %g3 + add %g5, %g3, %g5 + ldx [%g5], %g3 + add %g3, 1, %g3 + stx %g3, [%g5] + /* Get CPU mondo queue base phys address into %g7. */ ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index cc97a43268ee..d883c5951e8b 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2659,6 +2659,7 @@ void do_getpsr(struct pt_regs *regs) } } +u64 cpu_mondo_counter[NR_CPUS] = {0}; struct trap_per_cpu trap_block[NR_CPUS]; EXPORT_SYMBOL(trap_block); diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index 395ec1800530..7d961f6e3907 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -375,6 +375,7 @@ tsb_flush: * %o1: TSB base config pointer * %o2: TSB huge config pointer, or NULL if none * %o3: Hypervisor TSB descriptor physical address + * %o4: Secondary context to load, if non-zero * * We have to run this whole thing with interrupts * disabled so that the current cpu doesn't change @@ -387,6 +388,17 @@ __tsb_context_switch: rdpr %pstate, %g1 wrpr %g1, PSTATE_IE, %pstate + brz,pn %o4, 1f + mov SECONDARY_CONTEXT, %o5 + +661: stxa %o4, [%o5] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %o4, [%o5] ASI_MMU + .previous + flush %g6 + +1: TRAP_LOAD_TRAP_BLOCK(%g2, %g3) stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c index 17bd2e167e07..df707a8ad311 100644 --- a/arch/sparc/power/hibernate.c +++ b/arch/sparc/power/hibernate.c @@ -35,6 +35,5 @@ void restore_processor_state(void) { struct mm_struct *mm = current->active_mm; - load_secondary_context(mm); - tsb_context_switch(mm); + tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); } diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index dc1fb28d9636..489b15016303 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -78,7 +78,7 @@ struct thread_info { #ifndef __ASSEMBLY__ -void arch_release_thread_info(struct thread_info *info); +void arch_release_thread_stack(unsigned long *stack); /* How to get the thread information struct from C. */ register unsigned long stack_pointer __asm__("sp"); diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 7d5769310bef..a97ab1a69a90 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -73,8 +73,9 @@ void arch_cpu_idle(void) /* * Release a thread_info structure */ -void arch_release_thread_info(struct thread_info *info) +void arch_release_thread_stack(unsigned long *stack) { + struct thread_info *info = (void *)stack; struct single_step_state *step_state = info->step_state; if (step_state) { diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c index 318b8465d302..06ceddb3a22e 100644 --- a/arch/x86/boot/string.c +++ b/arch/x86/boot/string.c @@ -14,6 +14,7 @@ #include <linux/types.h> #include "ctype.h" +#include "string.h" int memcmp(const void *s1, const void *s2, size_t len) { diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h index 725e820602b1..113588ddb43f 100644 --- a/arch/x86/boot/string.h +++ b/arch/x86/boot/string.h @@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len); #define memset(d,c,l) __builtin_memset(d,c,l) #define memcmp __builtin_memcmp +extern int strcmp(const char *str1, const char *str2); +extern int strncmp(const char *cs, const char *ct, size_t count); +extern size_t strlen(const char *s); +extern char *strstr(const char *s1, const char *s2); +extern size_t strnlen(const char *s, size_t maxlen); +extern unsigned int atou(const char *s); +extern unsigned long long simple_strtoull(const char *cp, char **endp, + unsigned int base); + #endif /* BOOT_STRING_H */ diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S index 1cd792db15ef..1eab79c9ac48 100644 --- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S +++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S @@ -117,11 +117,10 @@ .set T1, REG_T1 .endm -#define K_BASE %r8 #define HASH_PTR %r9 +#define BLOCKS_CTR %r8 #define BUFFER_PTR %r10 #define BUFFER_PTR2 %r13 -#define BUFFER_END %r11 #define PRECALC_BUF %r14 #define WK_BUF %r15 @@ -205,14 +204,14 @@ * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ - vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP + vmovdqu (i * 2)(BUFFER_PTR), W_TMP .elseif ((i & 7) == 1) - vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\ + vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\ WY_TMP, WY_TMP .elseif ((i & 7) == 2) vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY .elseif ((i & 7) == 4) - vpaddd K_XMM(K_BASE), WY, WY_TMP + vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP .elseif ((i & 7) == 7) vmovdqu WY_TMP, PRECALC_WK(i&~7) @@ -255,7 +254,7 @@ vpxor WY, WY_TMP, WY_TMP .elseif ((i & 7) == 7) vpxor WY_TMP2, WY_TMP, WY - vpaddd K_XMM(K_BASE), WY, WY_TMP + vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY @@ -291,7 +290,7 @@ vpsrld $30, WY, WY vpor WY, WY_TMP, WY .elseif ((i & 7) == 7) - vpaddd K_XMM(K_BASE), WY, WY_TMP + vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY @@ -446,6 +445,16 @@ .endm +/* Add constant only if (%2 > %3) condition met (uses RTA as temp) + * %1 + %2 >= %3 ? %4 : 0 + */ +.macro ADD_IF_GE a, b, c, d + mov \a, RTA + add $\d, RTA + cmp $\c, \b + cmovge RTA, \a +.endm + /* * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining */ @@ -463,13 +472,16 @@ lea (2*4*80+32)(%rsp), WK_BUF # Precalc WK for first 2 blocks - PRECALC_OFFSET = 0 + ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64 .set i, 0 .rept 160 PRECALC i .set i, i + 1 .endr - PRECALC_OFFSET = 128 + + /* Go to next block if needed */ + ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128 + ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 xchg WK_BUF, PRECALC_BUF .align 32 @@ -479,8 +491,8 @@ _loop: * we use K_BASE value as a signal of a last block, * it is set below by: cmovae BUFFER_PTR, K_BASE */ - cmp K_BASE, BUFFER_PTR - jne _begin + test BLOCKS_CTR, BLOCKS_CTR + jnz _begin .align 32 jmp _end .align 32 @@ -512,10 +524,10 @@ _loop0: .set j, j+2 .endr - add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */ - cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */ - cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ - + /* Update Counter */ + sub $1, BLOCKS_CTR + /* Move to the next block only if needed*/ + ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128 /* * rounds * 60,62,64,66,68 @@ -532,8 +544,8 @@ _loop0: UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 16(HASH_PTR), E - cmp K_BASE, BUFFER_PTR /* is current block the last one? */ - je _loop + test BLOCKS_CTR, BLOCKS_CTR + jz _loop mov TB, B @@ -575,10 +587,10 @@ _loop2: .set j, j+2 .endr - add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */ - - cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */ - cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ + /* update counter */ + sub $1, BLOCKS_CTR + /* Move to the next block only if needed*/ + ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 jmp _loop3 _loop3: @@ -641,19 +653,12 @@ _loop3: avx2_zeroupper - lea K_XMM_AR(%rip), K_BASE - + /* Setup initial values */ mov CTX, HASH_PTR mov BUF, BUFFER_PTR - lea 64(BUF), BUFFER_PTR2 - - shl $6, CNT /* mul by 64 */ - add BUF, CNT - add $64, CNT - mov CNT, BUFFER_END - cmp BUFFER_END, BUFFER_PTR2 - cmovae K_BASE, BUFFER_PTR2 + mov BUF, BUFFER_PTR2 + mov CNT, BLOCKS_CTR xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index 7de207a11014..dd14616b7739 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data, static bool avx2_usable(void) { - if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) + if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI1) && boot_cpu_has(X86_FEATURE_BMI2)) return true; diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index a55697d19824..cc0f2f5da19b 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1190,6 +1190,8 @@ ENTRY(nmi) * other IST entries. */ + ASM_CLAC + /* Use %rdx as our temp variable throughout */ pushq %rdx diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 07cf288b692e..bcd3d6199464 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -247,11 +247,11 @@ extern int force_personality32; /* * This is the base location for PIE (ET_DYN with INTERP) loads. On - * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * 64-bit, this is above 4GB to leave the entire 32-bit address * space open for things that want to use the area for 32-bit pointers. */ #define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ - 0x100000000UL) + (TASK_SIZE / 3 * 2)) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 8900400230c6..2cdae69d7e0b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -153,7 +153,7 @@ static void __intel_pmu_lbr_enable(bool pmi) */ if (cpuc->lbr_sel) lbr_select = cpuc->lbr_sel->config; - if (!pmi) + if (!pmi && cpuc->lbr_sel) wrmsrl(MSR_LBR_SELECT, lbr_select); rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); @@ -432,8 +432,10 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) int out = 0; int num = x86_pmu.lbr_nr; - if (cpuc->lbr_sel->config & LBR_CALL_STACK) - num = tos; + if (cpuc->lbr_sel) { + if (cpuc->lbr_sel->config & LBR_CALL_STACK) + num = tos; + } for (i = 0; i < num; i++) { unsigned long lbr_idx = (tos - i) & mask; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index cec49ecf5f31..32187f8a49b4 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token) if (hlist_unhashed(&n.link)) break; + rcu_irq_exit(); + if (!n.halted) { local_irq_enable(); schedule(); @@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token) /* * We cannot reschedule. So halt. */ - rcu_irq_exit(); native_safe_halt(); local_irq_disable(); - rcu_irq_enter(); } + + rcu_irq_enter(); } if (!n.halted) finish_wait(&n.wq, &wait); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index eac4f3b02df9..bb81cd05f0bc 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1067,6 +1067,7 @@ static int ghes_remove(struct platform_device *ghes_dev) if (list_empty(&ghes_sci)) unregister_acpi_hed_notifier(&ghes_notifier_sci); mutex_unlock(&ghes_list_mutex); + synchronize_rcu(); break; case ACPI_HEST_NOTIFY_NMI: ghes_nmi_remove(ghes); diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c index ccdc8db16bb8..fa2cf2dc4e33 100644 --- a/drivers/acpi/ioapic.c +++ b/drivers/acpi/ioapic.c @@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data) struct resource *res = data; struct resource_win win; + /* + * We might assign this to 'res' later, make sure all pointers are + * cleared before the resource is added to the global list + */ + memset(&win, 0, sizeof(win)); + res->flags = 0; if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0) return AE_OK; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 71ebe36577c6..34f45abe0181 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3253,6 +3253,7 @@ static void binder_transaction(struct binder_proc *proc, err_dead_proc_or_thread: return_error = BR_DEAD_REPLY; return_error_line = __LINE__; + binder_dequeue_work(proc, tcomplete); err_translate_failed: err_bad_object_type: err_bad_offset: @@ -3522,11 +3523,13 @@ static int binder_thread_write(struct binder_proc *proc, BUG_ON(buf_node->proc != proc); w = binder_dequeue_work_head_ilocked( &buf_node->async_todo); - if (!w) + if (!w) { buf_node->has_async_transaction = 0; - else + } else { binder_enqueue_work_ilocked( - w, &thread->todo); + w, &proc->todo); + binder_wakeup_proc_ilocked(proc); + } binder_node_inner_unlock(buf_node); } trace_binder_transaction_buffer_release(buffer); @@ -3670,22 +3673,12 @@ static int binder_thread_write(struct binder_proc *proc, ref->death = death; if (ref->node->proc == NULL) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; - if (thread->looper & - (BINDER_LOOPER_STATE_REGISTERED | - BINDER_LOOPER_STATE_ENTERED)) - binder_enqueue_work( - proc, - &ref->death->work, - &thread->todo); - else { - binder_inner_proc_lock(proc); - binder_enqueue_work_ilocked( - &ref->death->work, - &proc->todo); - binder_wakeup_proc_ilocked( - proc); - binder_inner_proc_unlock(proc); - } + + binder_inner_proc_lock(proc); + binder_enqueue_work_ilocked( + &ref->death->work, &proc->todo); + binder_wakeup_proc_ilocked(proc); + binder_inner_proc_unlock(proc); } } else { if (ref->death == NULL) { @@ -3802,12 +3795,6 @@ static void binder_stat_br(struct binder_proc *proc, } } -static int binder_has_thread_work(struct binder_thread *thread) -{ - return !binder_worklist_empty(thread->proc, &thread->todo) || - thread->looper_need_return; -} - static int binder_put_node_cmd(struct binder_proc *proc, struct binder_thread *thread, void __user **ptrp, @@ -4438,12 +4425,9 @@ static unsigned int binder_poll(struct file *filp, binder_inner_proc_unlock(thread->proc); - if (binder_has_work(thread, wait_for_proc_work)) - return POLLIN; - poll_wait(filp, &thread->wait, wait); - if (binder_has_thread_work(thread)) + if (binder_has_work(thread, wait_for_proc_work)) return POLLIN; return 0; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e417e1a1d02c..5b2aee83d776 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2832,10 +2832,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) { if (!sata_pmp_attached(ap)) { - if (likely(devno < ata_link_max_devices(&ap->link))) + if (likely(devno >= 0 && + devno < ata_link_max_devices(&ap->link))) return &ap->link.device[devno]; } else { - if (likely(devno < ap->nr_pmp_links)) + if (likely(devno >= 0 && + devno < ap->nr_pmp_links)) return &ap->pmp_link[devno].device[0]; } diff --git a/drivers/base/core.c b/drivers/base/core.c index 5a56a8e9f006..f3d395bfe8f6 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1116,13 +1116,7 @@ int device_add(struct device *dev) error = dpm_sysfs_add(dev); if (error) goto DPMError; - if ((dev->pm_domain) || (dev->type && dev->type->pm) - || (dev->class && (dev->class->pm || dev->class->resume)) - || (dev->bus && (dev->bus->pm || dev->bus->resume)) || - (dev->driver && dev->driver->pm)) { - device_pm_add(dev); - } - + device_pm_add(dev); if (MAJOR(dev->devt)) { error = device_create_file(dev, &dev_attr_dev); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a88590bb0b10..6c5bc3fadfcf 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -162,12 +162,6 @@ void device_pm_move_before(struct device *deva, struct device *devb) pr_debug("PM: Moving %s:%s before %s:%s\n", deva->bus ? deva->bus->name : "No Bus", dev_name(deva), devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); - if (!((devb->pm_domain) || (devb->type && devb->type->pm) - || (devb->class && (devb->class->pm || devb->class->resume)) - || (devb->bus && (devb->bus->pm || devb->bus->resume)) || - (devb->driver && devb->driver->pm))) { - device_pm_add(devb); - } /* Delete deva from dpm_list and reinsert before devb. */ list_move_tail(&deva->power.entry, &devb->power.entry); } @@ -182,12 +176,6 @@ void device_pm_move_after(struct device *deva, struct device *devb) pr_debug("PM: Moving %s:%s after %s:%s\n", deva->bus ? deva->bus->name : "No Bus", dev_name(deva), devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); - if (!((devb->pm_domain) || (devb->type && devb->type->pm) - || (devb->class && (devb->class->pm || devb->class->resume)) - || (devb->bus && (devb->bus->pm || devb->bus->resume)) || - (devb->driver && devb->driver->pm))) { - device_pm_add(devb); - } /* Delete deva from dpm_list and reinsert after devb. */ list_move(&deva->power.entry, &devb->power.entry); } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6ca35495a5be..1e5cd39d0cc2 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -641,11 +641,12 @@ static int virtblk_probe(struct virtio_device *vdev) if (err) goto out_put_disk; - q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); + q = blk_mq_init_queue(&vblk->tag_set); if (IS_ERR(q)) { err = -ENOMEM; goto out_free_tags; } + vblk->disk->queue = q; q->queuedata = vblk; diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c index 363b4692d228..7abd5598c47b 100644 --- a/drivers/bluetooth/btfm_slim_wcn3990.c +++ b/drivers/bluetooth/btfm_slim_wcn3990.c @@ -122,6 +122,18 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num, ret, reg); goto error; } + } else if (port_num == CHRK_SB_PGD_PORT_TX_SCO) { + /* SCO Tx */ + reg_val = 0x1 << CHRK_SB_PGD_PORT_TX_SCO; + reg = CHRK_SB_PGD_TX_PORTn_MULTI_CHNL_0(port_num); + BTFMSLIM_DBG("writing reg_val (%d) to reg(%x)", + reg_val, reg); + ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD); + if (ret) { + BTFMSLIM_ERR("failed to write (%d) reg 0x%x", + ret, reg); + goto error; + } } /* Enable Tx port hw auto recovery for underrun or overrun error */ @@ -146,9 +158,15 @@ enable_disable_rxport: if (is_fm_port(port_num)) reg_val = en | CHRK_SB_PGD_PORT_WM_L8; + else if (port_num == CHRK_SB_PGD_PORT_TX_SCO) + reg_val = enable ? en | CHRK_SB_PGD_PORT_WM_L1 : en; else reg_val = enable ? en | CHRK_SB_PGD_PORT_WM_LB : en; + if (enable && port_num == CHRK_SB_PGD_PORT_TX_SCO) + BTFMSLIM_INFO("programming SCO Tx with reg_val %d to reg 0x%x", + reg_val, reg); + ret = btfm_slim_write(btfmslim, reg, 1, ®_val, IFD); if (ret) BTFMSLIM_ERR("failed to write (%d) reg 0x%x", ret, reg); diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index e206d9db4d7d..e1e86f6e74dc 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -555,6 +555,11 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len, mask_info); return -EINVAL; } + if (!mask_info->ptr) { + pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", + __func__, mask_info->ptr); + return -EINVAL; + } if (!diag_apps_responds()) return 0; @@ -656,7 +661,11 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, mask_info); return -EINVAL; } - + if (!mask_info->ptr) { + pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", + __func__, mask_info->ptr); + return -EINVAL; + } if (!diag_apps_responds()) return 0; @@ -669,6 +678,12 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, rsp.status = MSG_STATUS_FAIL; rsp.padding = 0; mask = (struct diag_msg_mask_t *)mask_info->ptr; + if (!mask->ptr) { + pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", + __func__, mask->ptr); + mutex_unlock(&driver->msg_mask_lock); + return -EINVAL; + } for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { if ((req->ssid_first < mask->ssid_first) || (req->ssid_first > mask->ssid_last_tools)) { @@ -714,11 +729,23 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, mask_info); return -EINVAL; } + if (!mask_info->ptr) { + pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", + __func__, mask_info->ptr); + return -EINVAL; + } req = (struct diag_msg_build_mask_t *)src_buf; mutex_lock(&mask_info->lock); mutex_lock(&driver->msg_mask_lock); mask = (struct diag_msg_mask_t *)mask_info->ptr; + if (!mask->ptr) { + pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", + __func__, mask->ptr); + mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&mask_info->lock); + return -EINVAL; + } for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { if (i < (driver->msg_mask_tbl_count - 1)) { mask_next = mask; @@ -831,6 +858,11 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, mask_info); return -EINVAL; } + if (!mask_info->ptr) { + pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", + __func__, mask_info->ptr); + return -EINVAL; + } req = (struct diag_msg_config_rsp_t *)src_buf; @@ -838,6 +870,13 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, mutex_lock(&driver->msg_mask_lock); mask = (struct diag_msg_mask_t *)mask_info->ptr; + if (!mask->ptr) { + pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", + __func__, mask->ptr); + mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&mask_info->lock); + return -EINVAL; + } mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED : DIAG_CTRL_MASK_ALL_DISABLED; for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { @@ -931,7 +970,11 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len, mask_info); return -EINVAL; } - + if (!mask_info->ptr) { + pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", + __func__, mask_info->ptr); + return -EINVAL; + } req = (struct diag_event_mask_config_t *)src_buf; mask_len = EVENT_COUNT_TO_BYTES(req->num_bits); if (mask_len <= 0 || mask_len > event_mask.mask_len) { @@ -989,6 +1032,11 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len, mask_info); return -EINVAL; } + if (!mask_info->ptr) { + pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", + __func__, mask_info->ptr); + return -EINVAL; + } toggle = *(src_buf + 1); mutex_lock(&mask_info->lock); @@ -1046,6 +1094,11 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len, mask_info); return -EINVAL; } + if (!mask_info->ptr) { + pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", + __func__, mask_info->ptr); + return -EINVAL; + } if (!diag_apps_responds()) return 0; @@ -1065,6 +1118,11 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len, write_len += rsp_header_len; log_item = (struct diag_log_mask_t *)mask_info->ptr; + if (!log_item->ptr) { + pr_err("diag: Invalid input in %s, mask: %pK\n", + __func__, log_item); + return -EINVAL; + } for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) { if (log_item->equip_id != req->equip_id) continue; @@ -1172,11 +1230,20 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len, mask_info); return -EINVAL; } + if (!mask_info->ptr) { + pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", + __func__, mask_info->ptr); + return -EINVAL; + } req = (struct diag_log_config_req_t *)src_buf; read_len += req_header_len; mask = (struct diag_log_mask_t *)mask_info->ptr; - + if (!mask->ptr) { + pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", + __func__, mask->ptr); + return -EINVAL; + } if (req->equip_id >= MAX_EQUIP_ID) { pr_err("diag: In %s, Invalid logging mask request, equip_id: %d\n", __func__, req->equip_id); @@ -1294,9 +1361,17 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len, mask_info); return -EINVAL; } - + if (!mask_info->ptr) { + pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n", + __func__, mask_info->ptr); + return -EINVAL; + } mask = (struct diag_log_mask_t *)mask_info->ptr; - + if (!mask->ptr) { + pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", + __func__, mask->ptr); + return -EINVAL; + } for (i = 0; i < MAX_EQUIP_ID; i++, mask++) { mutex_lock(&mask->lock); memset(mask->ptr, 0, mask->range); @@ -1562,7 +1637,7 @@ static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len, static void __diag_mask_exit(struct diag_mask_info *mask_info) { - if (!mask_info) + if (!mask_info || !mask_info->ptr) return; mutex_lock(&mask_info->lock); @@ -1619,11 +1694,17 @@ void diag_log_mask_free(struct diag_mask_info *mask_info) int i; struct diag_log_mask_t *mask = NULL; - if (!mask_info) + if (!mask_info || !mask_info->ptr) return; mutex_lock(&mask_info->lock); mask = (struct diag_log_mask_t *)mask_info->ptr; + if (!mask->ptr) { + pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", + __func__, mask->ptr); + mutex_unlock(&mask_info->lock); + return; + } for (i = 0; i < MAX_EQUIP_ID; i++, mask++) { kfree(mask->ptr); mask->ptr = NULL; @@ -1698,11 +1779,18 @@ void diag_msg_mask_free(struct diag_mask_info *mask_info) int i; struct diag_msg_mask_t *mask = NULL; - if (!mask_info) + if (!mask_info || !mask_info->ptr) return; mutex_lock(&mask_info->lock); mutex_lock(&driver->msg_mask_lock); mask = (struct diag_msg_mask_t *)mask_info->ptr; + if (!mask->ptr) { + pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", + __func__, mask->ptr); + mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&mask_info->lock); + return; + } for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { kfree(mask->ptr); mask->ptr = NULL; @@ -1869,6 +1957,11 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count, if (!mask_info) return -EIO; + if (!mask_info->ptr || !mask_info->update_buf) { + pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n", + __func__, mask_info->ptr, mask_info->update_buf); + return -EINVAL; + } mutex_lock(&driver->diag_maskclear_mutex); if (driver->mask_clear) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, @@ -1881,6 +1974,13 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count, mutex_lock(&driver->msg_mask_lock); mask = (struct diag_msg_mask_t *)(mask_info->ptr); + if (!mask->ptr) { + pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", + __func__, mask->ptr); + mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&mask_info->lock); + return -EINVAL; + } for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { ptr = mask_info->update_buf; len = 0; @@ -1941,8 +2041,20 @@ int diag_copy_to_user_log_mask(char __user *buf, size_t count, if (!mask_info) return -EIO; + if (!mask_info->ptr || !mask_info->update_buf) { + pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n", + __func__, mask_info->ptr, mask_info->update_buf); + return -EINVAL; + } + mutex_lock(&mask_info->lock); mask = (struct diag_log_mask_t *)(mask_info->ptr); + if (!mask->ptr) { + pr_err("diag: Invalid input in %s, mask->ptr: %pK\n", + __func__, mask->ptr); + mutex_unlock(&mask_info->lock); + return -EINVAL; + } for (i = 0; i < MAX_EQUIP_ID; i++, mask++) { ptr = mask_info->update_buf; len = 0; diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c index 74777212e4cf..10038e629e6c 100644 --- a/drivers/char/diag/diagfwd_cntl.c +++ b/drivers/char/diag/diagfwd_cntl.c @@ -1119,6 +1119,18 @@ void diag_map_pd_to_diagid(uint8_t pd, uint8_t *diag_id, int *peripheral) *diag_id = DIAG_ID_LPASS; *peripheral = PERIPHERAL_LPASS; break; + case PERIPHERAL_WCNSS: + *diag_id = 0; + *peripheral = PERIPHERAL_WCNSS; + break; + case PERIPHERAL_SENSORS: + *diag_id = 0; + *peripheral = PERIPHERAL_SENSORS; + break; + case PERIPHERAL_WDSP: + *diag_id = 0; + *peripheral = PERIPHERAL_WDSP; + break; case PERIPHERAL_CDSP: *diag_id = DIAG_ID_CDSP; *peripheral = PERIPHERAL_CDSP; diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c index 78b8452b19b3..0f94bab3bf84 100644 --- a/drivers/char/diag/diagfwd_peripheral.c +++ b/drivers/char/diag/diagfwd_peripheral.c @@ -500,15 +500,29 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, temp_buf_main += (buf_len + 4); processed += buf_len; } + + if (flag_buf_1) { + fwd_info->cpd_len_1 = len_cpd; + if (fwd_info->type == TYPE_DATA) + fwd_info->upd_len_1_a = len_upd_1; + if (peripheral == PERIPHERAL_LPASS && + fwd_info->type == TYPE_DATA) + fwd_info->upd_len_2_a = len_upd_2; + } else if (flag_buf_2) { + fwd_info->cpd_len_2 = len_cpd; + if (fwd_info->type == TYPE_DATA) + fwd_info->upd_len_1_b = len_upd_1; + if (peripheral == PERIPHERAL_LPASS && + fwd_info->type == TYPE_DATA) + fwd_info->upd_len_2_b = len_upd_2; + } + if (peripheral == PERIPHERAL_LPASS && fwd_info->type == TYPE_DATA && len_upd_2) { - if (flag_buf_1) { - fwd_info->upd_len_2_a = len_upd_2; + if (flag_buf_1) temp_ptr_upd = fwd_info->buf_upd_2_a; - } else { - fwd_info->upd_len_2_b = len_upd_2; + else temp_ptr_upd = fwd_info->buf_upd_2_b; - } temp_ptr_upd->ctxt &= 0x00FFFFFF; temp_ptr_upd->ctxt |= (SET_PD_CTXT(ctxt_upd_2)); @@ -522,15 +536,10 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, fwd_info->upd_len_2_b = 0; } if (fwd_info->type == TYPE_DATA && len_upd_1) { - if (flag_buf_1) { - fwd_info->upd_len_1_a = - len_upd_1; + if (flag_buf_1) temp_ptr_upd = fwd_info->buf_upd_1_a; - } else { - fwd_info->upd_len_1_b = - len_upd_1; + else temp_ptr_upd = fwd_info->buf_upd_1_b; - } temp_ptr_upd->ctxt &= 0x00FFFFFF; temp_ptr_upd->ctxt |= (SET_PD_CTXT(ctxt_upd_1)); @@ -544,10 +553,6 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, fwd_info->upd_len_1_b = 0; } if (len_cpd) { - if (flag_buf_1) - fwd_info->cpd_len_1 = len_cpd; - else - fwd_info->cpd_len_2 = len_cpd; temp_ptr_cpd->ctxt &= 0x00FFFFFF; temp_ptr_cpd->ctxt |= (SET_PD_CTXT(ctxt_cpd)); @@ -1049,16 +1054,7 @@ void diagfwd_close_transport(uint8_t transport, uint8_t peripheral) dest_info->buf_ptr[i] = fwd_info->buf_ptr[i]; if (!check_channel_state(dest_info->ctxt)) diagfwd_late_open(dest_info); - - /* - * Open control channel to update masks after buffers are - * initialized for peripherals that have transport other than - * GLINK. GLINK supported peripheral mask update will - * happen after glink buffers are initialized. - */ - - if (dest_info->transport != TRANSPORT_GLINK) - diagfwd_cntl_open(dest_info); + diagfwd_cntl_open(dest_info); init_fn(peripheral); mutex_unlock(&driver->diagfwd_channel_mutex[peripheral]); diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]); @@ -1251,15 +1247,11 @@ int diagfwd_channel_open(struct diagfwd_info *fwd_info) diagfwd_buffers_init(fwd_info); /* - * Initialize buffers for glink supported - * peripherals only. Open control channel to update - * masks after buffers are initialized. + * Initialize buffers for glink supported + * peripherals only. */ - if (fwd_info->transport == TRANSPORT_GLINK) { + if (fwd_info->transport == TRANSPORT_GLINK) diagfwd_write_buffers_init(fwd_info); - if (fwd_info->type == TYPE_CNTL) - diagfwd_cntl_open(fwd_info); - } if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open) fwd_info->c_ops->open(fwd_info); @@ -1345,12 +1337,33 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt) if (ctxt == 1 && fwd_info->buf_1) { /* Buffer 1 for core PD is freed */ - atomic_set(&fwd_info->buf_1->in_busy, 0); fwd_info->cpd_len_1 = 0; + + if (peripheral == PERIPHERAL_LPASS) { + if (!fwd_info->upd_len_1_a && + !fwd_info->upd_len_2_a) + atomic_set(&fwd_info->buf_1->in_busy, 0); + } else if (peripheral == PERIPHERAL_MODEM) { + if (!fwd_info->upd_len_1_a) + atomic_set(&fwd_info->buf_1->in_busy, 0); + } else { + atomic_set(&fwd_info->buf_1->in_busy, 0); + } } else if (ctxt == 2 && fwd_info->buf_2) { /* Buffer 2 for core PD is freed */ - atomic_set(&fwd_info->buf_2->in_busy, 0); fwd_info->cpd_len_2 = 0; + + if (peripheral == PERIPHERAL_LPASS) { + if (!fwd_info->upd_len_1_b && + !fwd_info->upd_len_2_b) + atomic_set(&fwd_info->buf_2->in_busy, 0); + } else if (peripheral == PERIPHERAL_MODEM) { + if (!fwd_info->upd_len_1_b) + atomic_set(&fwd_info->buf_2->in_busy, 0); + } else { + atomic_set(&fwd_info->buf_2->in_busy, 0); + } + } else if (ctxt == 3 && fwd_info->buf_upd_1_a) { /* Buffer 1 for user pd 1 is freed */ atomic_set(&fwd_info->buf_upd_1_a->in_busy, 0); diff --git a/drivers/clk/msm/clock-dummy.c b/drivers/clk/msm/clock-dummy.c index e5339b110cd6..caa6a6ab7565 100644 --- a/drivers/clk/msm/clock-dummy.c +++ b/drivers/clk/msm/clock-dummy.c @@ -64,12 +64,18 @@ struct clk dummy_clk = { static void *dummy_clk_dt_parser(struct device *dev, struct device_node *np) { struct clk *c; + u32 rate; + c = devm_kzalloc(dev, sizeof(*c), GFP_KERNEL); if (!c) { dev_err(dev, "failed to map memory for %s\n", np->name); return ERR_PTR(-ENOMEM); } c->ops = &clk_ops_dummy; + + if (!of_property_read_u32(np, "clock-frequency", &rate)) + c->rate = rate; + return msmclk_generic_clk_init(dev, np, c); } MSMCLK_PARSER(dummy_clk_dt_parser, "qcom,dummy-clk", 0); @@ -82,6 +88,7 @@ static struct clk *of_dummy_get(struct of_phandle_args *clkspec, static struct of_device_id msm_clock_dummy_match_table[] = { { .compatible = "qcom,dummycc" }, + { .compatible = "fixed-clock" }, {} }; diff --git a/drivers/clk/msm/mdss/mdss-pll.h b/drivers/clk/msm/mdss/mdss-pll.h index 0120d71f0daf..7aa8b0d6c051 100644 --- a/drivers/clk/msm/mdss/mdss-pll.h +++ b/drivers/clk/msm/mdss/mdss-pll.h @@ -70,6 +70,7 @@ struct dfps_info { struct dfps_panel_info panel_dfps; struct dfps_codes_info codes_dfps[DFPS_MAX_NUM_OF_FRAME_RATES]; void *dfps_fb_base; + uint32_t chip_serial; }; struct mdss_pll_resources { diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c index ddaeca1b29e4..510a9803bd82 100644 --- a/drivers/clk/qcom/clk-cpu-osm.c +++ b/drivers/clk/qcom/clk-cpu-osm.c @@ -226,8 +226,8 @@ enum clk_osm_trace_packet_id { #define PLL_DD_D0_USER_CTL_LO 0x17916208 #define PLL_DD_D1_USER_CTL_LO 0x17816208 -#define PWRCL_EFUSE_SHIFT 0 -#define PWRCL_EFUSE_MASK 0 +#define PWRCL_EFUSE_SHIFT 29 +#define PWRCL_EFUSE_MASK 0x7 #define PERFCL_EFUSE_SHIFT 29 #define PERFCL_EFUSE_MASK 0x7 @@ -623,18 +623,21 @@ static inline bool is_better_rate(unsigned long req, unsigned long best, return (req <= new && new < best) || (best < req && best < new); } -static long clk_osm_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static int clk_osm_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { int i; unsigned long rrate = 0; + unsigned long rate = req->rate; /* * If the rate passed in is 0, return the first frequency in the * FMAX table. */ - if (!rate) - return hw->init->rate_max[0]; + if (!rate) { + req->rate = hw->init->rate_max[0]; + return 0; + } for (i = 0; i < hw->init->num_rate_max; i++) { if (is_better_rate(rate, rrate, hw->init->rate_max[i])) { @@ -644,10 +647,12 @@ static long clk_osm_round_rate(struct clk_hw *hw, unsigned long rate, } } + req->rate = rrate; + pr_debug("%s: rate %lu, rrate %ld, Rate max %ld\n", __func__, rate, rrate, hw->init->rate_max[i]); - return rrate; + return 0; } static int clk_osm_search_table(struct osm_entry *table, int entries, long rate) @@ -678,18 +683,19 @@ static int clk_osm_set_rate(struct clk_hw *hw, unsigned long rate, { struct clk_osm *cpuclk = to_clk_osm(hw); int index = 0; - unsigned long r_rate; + struct clk_rate_request req; - r_rate = clk_osm_round_rate(hw, rate, NULL); + req.rate = rate; + clk_osm_determine_rate(hw, &req); - if (rate != r_rate) { + if (rate != req.rate) { pr_err("invalid rate requested rate=%ld\n", rate); return -EINVAL; } /* Convert rate to table index */ index = clk_osm_search_table(cpuclk->osm_table, - cpuclk->num_entries, r_rate); + cpuclk->num_entries, req.rate); if (index < 0) { pr_err("cannot set cluster %u to %lu\n", cpuclk->cluster_num, rate); @@ -773,7 +779,7 @@ static unsigned long clk_osm_recalc_rate(struct clk_hw *hw, static struct clk_ops clk_ops_cpu_osm = { .enable = clk_osm_enable, .set_rate = clk_osm_set_rate, - .round_rate = clk_osm_round_rate, + .determine_rate = clk_osm_determine_rate, .list_rate = clk_osm_list_rate, .recalc_rate = clk_osm_recalc_rate, .debug_init = clk_debug_measure_add, diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c index 490f8d9ddb9f..68b6a26f00b8 100644 --- a/drivers/crypto/msm/ice.c +++ b/drivers/crypto/msm/ice.c @@ -869,7 +869,7 @@ static int qcom_ice_restore_key_config(struct ice_device *ice_dev) static int qcom_ice_init_clocks(struct ice_device *ice) { int ret = -EINVAL; - struct ice_clk_info *clki; + struct ice_clk_info *clki = NULL; struct device *dev = ice->pdev; struct list_head *head = &ice->clk_list_head; @@ -913,7 +913,7 @@ out: static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable) { int ret = 0; - struct ice_clk_info *clki; + struct ice_clk_info *clki = NULL; struct device *dev = ice->pdev; struct list_head *head = &ice->clk_list_head; @@ -1590,12 +1590,14 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node) if (ice_dev->pdev->of_node == node) { pr_info("%s: found ice device %pK\n", __func__, ice_dev); + ice_pdev = to_platform_device(ice_dev->pdev); break; } } - ice_pdev = to_platform_device(ice_dev->pdev); - pr_info("%s: matching platform device %pK\n", __func__, ice_pdev); + if (ice_pdev) + pr_info("%s: matching platform device %pK\n", __func__, + ice_pdev); out: return ice_pdev; } @@ -1615,11 +1617,11 @@ static struct ice_device *get_ice_device_from_storage_type if (!strcmp(ice_dev->ice_instance_type, storage_type)) { pr_debug("%s: found ice device %pK\n", __func__, ice_dev); - break; + return ice_dev; } } out: - return ice_dev; + return NULL; } static int enable_ice_setup(struct ice_device *ice_dev) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 6253775b8d9c..50d74e5ce41b 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1247,6 +1247,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state) if (config->funcs->atomic_check) ret = config->funcs->atomic_check(state->dev, state); + if (ret) + return ret; + if (!state->allow_modeset) { for_each_crtc_in_state(state, crtc, crtc_state, i) { if (drm_atomic_crtc_needs_modeset(crtc_state)) { @@ -1257,7 +1260,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state) } } - return ret; + return 0; } EXPORT_SYMBOL(drm_atomic_check_only); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index b205224f1a44..9147113139be 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -715,13 +715,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) struct drm_gem_object *obj = ptr; struct drm_device *dev = obj->dev; + if (dev->driver->gem_close_object) + dev->driver->gem_close_object(obj, file_priv); + if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_gem_remove_prime_handles(obj, file_priv); drm_vma_node_revoke(&obj->vma_node, file_priv->filp); - if (dev->driver->gem_close_object) - dev->driver->gem_close_object(obj, file_priv); - drm_gem_object_handle_unreference_unlocked(obj); return 0; diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index dbc198b00792..cb3b25ddd0da 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -98,3 +98,13 @@ config DRM_SDE_HDMI default y help Choose this option if HDMI connector support is needed in SDE driver. + +config DRM_SDE_EVTLOG_DEBUG + bool "Enable event logging in MSM DRM" + depends on DRM_MSM + help + The SDE DRM debugging provides support to enable display debugging + features to: dump SDE registers during driver errors, panic + driver during fatal errors and enable some display-driver logging + into an internal buffer (this avoids logging overhead). + diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 4c082fff2fc5..678b2178cb69 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -49,6 +49,7 @@ msm_drm-y := \ sde/sde_color_processing.o \ sde/sde_vbif.o \ sde/sde_splash.o \ + sde_dbg.o \ sde_dbg_evtlog.o \ sde_io_util.o \ dba_bridge.o \ diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index c085e173232b..049478fd9bcb 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -406,11 +406,9 @@ static const unsigned int a3xx_registers[] = { #ifdef CONFIG_DEBUG_FS static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) { - gpu->funcs->pm_resume(gpu); seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A3XX_RBBM_STATUS)); adreno_show(gpu, m); - gpu->funcs->pm_suspend(gpu); } #endif diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 624c2a87d593..45c83fbe20e1 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -443,13 +443,9 @@ static const unsigned int a4xx_registers[] = { #ifdef CONFIG_DEBUG_FS static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m) { - gpu->funcs->pm_resume(gpu); - seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A4XX_RBBM_STATUS)); - adreno_show(gpu, m); - gpu->funcs->pm_suspend(gpu); } #endif diff --git a/drivers/gpu/drm/msm/adreno/a5xx_counters.c b/drivers/gpu/drm/msm/adreno/a5xx_counters.c index bc442039c308..1d5e61daca47 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_counters.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_counters.c @@ -86,15 +86,15 @@ static int a5xx_counter_get(struct msm_gpu *gpu, spin_unlock(&group->lock); - if (group->funcs.enable) - group->funcs.enable(gpu, group, empty); + if (pm_runtime_active(&gpu->pdev->dev) && group->funcs.enable) + group->funcs.enable(gpu, group, empty, false); return empty; } /* The majority of the non-fixed counter selects can be programmed by the CPU */ static void a5xx_counter_enable_cpu(struct msm_gpu *gpu, - struct adreno_counter_group *group, int counterid) + struct adreno_counter_group *group, int counterid, bool restore) { struct adreno_counter *counter = &group->counters[counterid]; @@ -102,15 +102,36 @@ static void a5xx_counter_enable_cpu(struct msm_gpu *gpu, } static void a5xx_counter_enable_pm4(struct msm_gpu *gpu, - struct adreno_counter_group *group, int counterid) + struct adreno_counter_group *group, int counterid, bool restore) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = gpu->rb[0]; struct adreno_counter *counter = &group->counters[counterid]; + /* + * If we are restoring the counters after a power cycle we can safely + * use AHB to enable the counters because we know SP/TP power collapse + * isn't active + */ + if (restore) { + a5xx_counter_enable_cpu(gpu, group, counterid, true); + return; + } + mutex_lock(&gpu->dev->struct_mutex); + /* + * If HW init hasn't run yet we can use the CPU to program the counter + * (and indeed we must because we can't submit commands to the + * GPU if it isn't initalized) + */ + if (gpu->needs_hw_init) { + a5xx_counter_enable_cpu(gpu, group, counterid, true); + mutex_unlock(&gpu->dev->struct_mutex); + return; + } + /* Turn off preemption for the duration of this command */ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1); OUT_RING(ring, 0x02); @@ -168,7 +189,7 @@ static void a5xx_counter_enable_pm4(struct msm_gpu *gpu, * registers */ static void a5xx_counter_enable_gpmu(struct msm_gpu *gpu, - struct adreno_counter_group *group, int counterid) + struct adreno_counter_group *group, int counterid, bool restore) { struct adreno_counter *counter = &group->counters[counterid]; u32 reg; @@ -192,7 +213,7 @@ static void a5xx_counter_enable_gpmu(struct msm_gpu *gpu, /* VBIF counters are selectable but have their own programming process */ static void a5xx_counter_enable_vbif(struct msm_gpu *gpu, - struct adreno_counter_group *group, int counterid) + struct adreno_counter_group *group, int counterid, bool restore) { struct adreno_counter *counter = &group->counters[counterid]; @@ -208,7 +229,7 @@ static void a5xx_counter_enable_vbif(struct msm_gpu *gpu, * use */ static void a5xx_counter_enable_vbif_power(struct msm_gpu *gpu, - struct adreno_counter_group *group, int counterid) + struct adreno_counter_group *group, int counterid, bool restore) { gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_CLR(counterid), 1); gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_CLR(counterid), 0); @@ -217,7 +238,7 @@ static void a5xx_counter_enable_vbif_power(struct msm_gpu *gpu, /* GPMU always on counter needs to be enabled before use */ static void a5xx_counter_enable_alwayson_power(struct msm_gpu *gpu, - struct adreno_counter_group *group, int counterid) + struct adreno_counter_group *group, int counterid, bool restore) { gpu_write(gpu, REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET, 1); } @@ -228,6 +249,10 @@ static u64 a5xx_counter_read(struct msm_gpu *gpu, if (counterid >= group->nr_counters) return 0; + /* If the power is off, return the shadow value */ + if (!pm_runtime_active(&gpu->pdev->dev)) + return group->counters[counterid].value; + return gpu_read64(gpu, group->counters[counterid].lo, group->counters[counterid].hi); } @@ -252,6 +277,77 @@ static void a5xx_counter_put(struct msm_gpu *gpu, spin_unlock(&group->lock); } +static void a5xx_counter_group_enable(struct msm_gpu *gpu, + struct adreno_counter_group *group, bool restore) +{ + int i; + + if (!group || !group->funcs.enable) + return; + + spin_lock(&group->lock); + + for (i = 0; i < group->nr_counters; i++) { + if (!group->counters[i].refcount) + continue; + + group->funcs.enable(gpu, group, i, restore); + } + spin_unlock(&group->lock); +} + +static void a5xx_counter_restore(struct msm_gpu *gpu, + struct adreno_counter_group *group) +{ + int i; + + spin_lock(&group->lock); + for (i = 0; i < group->nr_counters; i++) { + struct adreno_counter *counter = &group->counters[i]; + uint32_t bit, offset = counter->load_bit; + + /* Don't load if the counter isn't active or can't be loaded */ + if (!counter->refcount) + continue; + + /* + * Each counter has a specific bit in one of four load command + * registers. Figure out which register / relative bit to use + * for the counter + */ + bit = do_div(offset, 32); + + /* Write the counter value */ + gpu_write64(gpu, REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO, + REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI, + counter->value); + + /* + * Write the load bit to load the counter - the command register + * will get reset to 0 after the operation completes + */ + gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 + offset, + (1 << bit)); + } + spin_unlock(&group->lock); +} + +static void a5xx_counter_save(struct msm_gpu *gpu, + struct adreno_counter_group *group) +{ + int i; + + spin_lock(&group->lock); + for (i = 0; i < group->nr_counters; i++) { + struct adreno_counter *counter = &group->counters[i]; + + if (counter->refcount > 0) + counter->value = gpu_read64(gpu, counter->lo, + counter->hi); + } + spin_unlock(&group->lock); +} + static struct adreno_counter a5xx_counters_alwayson[1] = { { REG_A5XX_RBBM_ALWAYSON_COUNTER_LO, REG_A5XX_RBBM_ALWAYSON_COUNTER_HI }, @@ -270,242 +366,242 @@ static struct adreno_counter a5xx_counters_ccu[] = { static struct adreno_counter a5xx_counters_cmp[] = { { REG_A5XX_RBBM_PERFCTR_CMP_0_LO, REG_A5XX_RBBM_PERFCTR_CMP_0_HI, - REG_A5XX_RB_PERFCTR_CMP_SEL_0 }, + REG_A5XX_RB_PERFCTR_CMP_SEL_0, 94 }, { REG_A5XX_RBBM_PERFCTR_CMP_1_LO, REG_A5XX_RBBM_PERFCTR_CMP_1_HI, - REG_A5XX_RB_PERFCTR_CMP_SEL_1 }, + REG_A5XX_RB_PERFCTR_CMP_SEL_1, 95 }, { REG_A5XX_RBBM_PERFCTR_CMP_2_LO, REG_A5XX_RBBM_PERFCTR_CMP_2_HI, - REG_A5XX_RB_PERFCTR_CMP_SEL_2 }, + REG_A5XX_RB_PERFCTR_CMP_SEL_2, 96 }, { REG_A5XX_RBBM_PERFCTR_CMP_3_LO, REG_A5XX_RBBM_PERFCTR_CMP_3_HI, - REG_A5XX_RB_PERFCTR_CMP_SEL_3 }, + REG_A5XX_RB_PERFCTR_CMP_SEL_3, 97 }, }; static struct adreno_counter a5xx_counters_cp[] = { { REG_A5XX_RBBM_PERFCTR_CP_0_LO, REG_A5XX_RBBM_PERFCTR_CP_0_HI, - REG_A5XX_CP_PERFCTR_CP_SEL_0 }, + REG_A5XX_CP_PERFCTR_CP_SEL_0, 0 }, { REG_A5XX_RBBM_PERFCTR_CP_1_LO, REG_A5XX_RBBM_PERFCTR_CP_1_HI, - REG_A5XX_CP_PERFCTR_CP_SEL_1 }, + REG_A5XX_CP_PERFCTR_CP_SEL_1, 1}, { REG_A5XX_RBBM_PERFCTR_CP_2_LO, REG_A5XX_RBBM_PERFCTR_CP_2_HI, - REG_A5XX_CP_PERFCTR_CP_SEL_2 }, + REG_A5XX_CP_PERFCTR_CP_SEL_2, 2 }, { REG_A5XX_RBBM_PERFCTR_CP_3_LO, REG_A5XX_RBBM_PERFCTR_CP_3_HI, - REG_A5XX_CP_PERFCTR_CP_SEL_3 }, + REG_A5XX_CP_PERFCTR_CP_SEL_3, 3 }, { REG_A5XX_RBBM_PERFCTR_CP_4_LO, REG_A5XX_RBBM_PERFCTR_CP_4_HI, - REG_A5XX_CP_PERFCTR_CP_SEL_4 }, + REG_A5XX_CP_PERFCTR_CP_SEL_4, 4 }, { REG_A5XX_RBBM_PERFCTR_CP_5_LO, REG_A5XX_RBBM_PERFCTR_CP_5_HI, - REG_A5XX_CP_PERFCTR_CP_SEL_5 }, + REG_A5XX_CP_PERFCTR_CP_SEL_5, 5 }, { REG_A5XX_RBBM_PERFCTR_CP_6_LO, REG_A5XX_RBBM_PERFCTR_CP_6_HI, - REG_A5XX_CP_PERFCTR_CP_SEL_6 }, + REG_A5XX_CP_PERFCTR_CP_SEL_6, 6 }, { REG_A5XX_RBBM_PERFCTR_CP_7_LO, REG_A5XX_RBBM_PERFCTR_CP_7_HI, - REG_A5XX_CP_PERFCTR_CP_SEL_7 }, + REG_A5XX_CP_PERFCTR_CP_SEL_7, 7 }, }; static struct adreno_counter a5xx_counters_hlsq[] = { { REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI, - REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 }, + REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0, 28 }, { REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI, - REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 }, + REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1, 29 }, { REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI, - REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 }, + REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2, 30 }, { REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI, - REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 }, + REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3, 31 }, { REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI, - REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 }, + REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4, 32 }, { REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI, - REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 }, + REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5, 33 }, { REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI, - REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 }, + REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6, 34 }, { REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI, - REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 }, + REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7, 35 }, }; static struct adreno_counter a5xx_counters_lrz[] = { { REG_A5XX_RBBM_PERFCTR_LRZ_0_LO, REG_A5XX_RBBM_PERFCTR_LRZ_0_HI, - REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 }, + REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0, 90 }, { REG_A5XX_RBBM_PERFCTR_LRZ_1_LO, REG_A5XX_RBBM_PERFCTR_LRZ_1_HI, - REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 }, + REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1, 91 }, { REG_A5XX_RBBM_PERFCTR_LRZ_2_LO, REG_A5XX_RBBM_PERFCTR_LRZ_2_HI, - REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 }, + REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2, 92 }, { REG_A5XX_RBBM_PERFCTR_LRZ_3_LO, REG_A5XX_RBBM_PERFCTR_LRZ_3_HI, - REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 }, + REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3, 93 }, }; static struct adreno_counter a5xx_counters_pc[] = { { REG_A5XX_RBBM_PERFCTR_PC_0_LO, REG_A5XX_RBBM_PERFCTR_PC_0_HI, - REG_A5XX_PC_PERFCTR_PC_SEL_0 }, + REG_A5XX_PC_PERFCTR_PC_SEL_0, 12 }, { REG_A5XX_RBBM_PERFCTR_PC_1_LO, REG_A5XX_RBBM_PERFCTR_PC_1_HI, - REG_A5XX_PC_PERFCTR_PC_SEL_1 }, + REG_A5XX_PC_PERFCTR_PC_SEL_1, 13 }, { REG_A5XX_RBBM_PERFCTR_PC_2_LO, REG_A5XX_RBBM_PERFCTR_PC_2_HI, - REG_A5XX_PC_PERFCTR_PC_SEL_2 }, + REG_A5XX_PC_PERFCTR_PC_SEL_2, 14 }, { REG_A5XX_RBBM_PERFCTR_PC_3_LO, REG_A5XX_RBBM_PERFCTR_PC_3_HI, - REG_A5XX_PC_PERFCTR_PC_SEL_3 }, + REG_A5XX_PC_PERFCTR_PC_SEL_3, 15 }, { REG_A5XX_RBBM_PERFCTR_PC_4_LO, REG_A5XX_RBBM_PERFCTR_PC_4_HI, - REG_A5XX_PC_PERFCTR_PC_SEL_4 }, + REG_A5XX_PC_PERFCTR_PC_SEL_4, 16 }, { REG_A5XX_RBBM_PERFCTR_PC_5_LO, REG_A5XX_RBBM_PERFCTR_PC_5_HI, - REG_A5XX_PC_PERFCTR_PC_SEL_5 }, + REG_A5XX_PC_PERFCTR_PC_SEL_5, 17 }, { REG_A5XX_RBBM_PERFCTR_PC_6_LO, REG_A5XX_RBBM_PERFCTR_PC_6_HI, - REG_A5XX_PC_PERFCTR_PC_SEL_6 }, + REG_A5XX_PC_PERFCTR_PC_SEL_6, 18 }, { REG_A5XX_RBBM_PERFCTR_PC_7_LO, REG_A5XX_RBBM_PERFCTR_PC_7_HI, - REG_A5XX_PC_PERFCTR_PC_SEL_7 }, + REG_A5XX_PC_PERFCTR_PC_SEL_7, 19 }, }; static struct adreno_counter a5xx_counters_ras[] = { { REG_A5XX_RBBM_PERFCTR_RAS_0_LO, REG_A5XX_RBBM_PERFCTR_RAS_0_HI, - REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 }, + REG_A5XX_GRAS_PERFCTR_RAS_SEL_0, 48 }, { REG_A5XX_RBBM_PERFCTR_RAS_1_LO, REG_A5XX_RBBM_PERFCTR_RAS_1_HI, - REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 }, + REG_A5XX_GRAS_PERFCTR_RAS_SEL_1, 49 }, { REG_A5XX_RBBM_PERFCTR_RAS_2_LO, REG_A5XX_RBBM_PERFCTR_RAS_2_HI, - REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 }, + REG_A5XX_GRAS_PERFCTR_RAS_SEL_2, 50 }, { REG_A5XX_RBBM_PERFCTR_RAS_3_LO, REG_A5XX_RBBM_PERFCTR_RAS_3_HI, - REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 }, + REG_A5XX_GRAS_PERFCTR_RAS_SEL_3, 51 }, }; static struct adreno_counter a5xx_counters_rb[] = { { REG_A5XX_RBBM_PERFCTR_RB_0_LO, REG_A5XX_RBBM_PERFCTR_RB_0_HI, - REG_A5XX_RB_PERFCTR_RB_SEL_0 }, + REG_A5XX_RB_PERFCTR_RB_SEL_0, 80 }, { REG_A5XX_RBBM_PERFCTR_RB_1_LO, REG_A5XX_RBBM_PERFCTR_RB_1_HI, - REG_A5XX_RB_PERFCTR_RB_SEL_1 }, + REG_A5XX_RB_PERFCTR_RB_SEL_1, 81 }, { REG_A5XX_RBBM_PERFCTR_RB_2_LO, REG_A5XX_RBBM_PERFCTR_RB_2_HI, - REG_A5XX_RB_PERFCTR_RB_SEL_2 }, + REG_A5XX_RB_PERFCTR_RB_SEL_2, 82 }, { REG_A5XX_RBBM_PERFCTR_RB_3_LO, REG_A5XX_RBBM_PERFCTR_RB_3_HI, - REG_A5XX_RB_PERFCTR_RB_SEL_3 }, + REG_A5XX_RB_PERFCTR_RB_SEL_3, 83 }, { REG_A5XX_RBBM_PERFCTR_RB_4_LO, REG_A5XX_RBBM_PERFCTR_RB_4_HI, - REG_A5XX_RB_PERFCTR_RB_SEL_4 }, + REG_A5XX_RB_PERFCTR_RB_SEL_4, 84 }, { REG_A5XX_RBBM_PERFCTR_RB_5_LO, REG_A5XX_RBBM_PERFCTR_RB_5_HI, - REG_A5XX_RB_PERFCTR_RB_SEL_5 }, + REG_A5XX_RB_PERFCTR_RB_SEL_5, 85 }, { REG_A5XX_RBBM_PERFCTR_RB_6_LO, REG_A5XX_RBBM_PERFCTR_RB_6_HI, - REG_A5XX_RB_PERFCTR_RB_SEL_6 }, + REG_A5XX_RB_PERFCTR_RB_SEL_6, 86 }, { REG_A5XX_RBBM_PERFCTR_RB_7_LO, REG_A5XX_RBBM_PERFCTR_RB_7_HI, - REG_A5XX_RB_PERFCTR_RB_SEL_7 }, + REG_A5XX_RB_PERFCTR_RB_SEL_7, 87 }, }; static struct adreno_counter a5xx_counters_rbbm[] = { { REG_A5XX_RBBM_PERFCTR_RBBM_0_LO, REG_A5XX_RBBM_PERFCTR_RBBM_0_HI, - REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 }, + REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 8 }, { REG_A5XX_RBBM_PERFCTR_RBBM_1_LO, REG_A5XX_RBBM_PERFCTR_RBBM_1_HI, - REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 }, + REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1, 9 }, { REG_A5XX_RBBM_PERFCTR_RBBM_2_LO, REG_A5XX_RBBM_PERFCTR_RBBM_2_HI, - REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 }, + REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2, 10 }, { REG_A5XX_RBBM_PERFCTR_RBBM_3_LO, REG_A5XX_RBBM_PERFCTR_RBBM_3_HI, - REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 }, + REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3, 11 }, }; static struct adreno_counter a5xx_counters_sp[] = { { REG_A5XX_RBBM_PERFCTR_SP_0_LO, REG_A5XX_RBBM_PERFCTR_SP_0_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_0 }, + REG_A5XX_SP_PERFCTR_SP_SEL_0, 68 }, { REG_A5XX_RBBM_PERFCTR_SP_1_LO, REG_A5XX_RBBM_PERFCTR_SP_1_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_1 }, + REG_A5XX_SP_PERFCTR_SP_SEL_1, 69 }, { REG_A5XX_RBBM_PERFCTR_SP_2_LO, REG_A5XX_RBBM_PERFCTR_SP_2_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_2 }, + REG_A5XX_SP_PERFCTR_SP_SEL_2, 70 }, { REG_A5XX_RBBM_PERFCTR_SP_3_LO, REG_A5XX_RBBM_PERFCTR_SP_3_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_3 }, + REG_A5XX_SP_PERFCTR_SP_SEL_3, 71 }, { REG_A5XX_RBBM_PERFCTR_SP_4_LO, REG_A5XX_RBBM_PERFCTR_SP_4_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_4 }, + REG_A5XX_SP_PERFCTR_SP_SEL_4, 72 }, { REG_A5XX_RBBM_PERFCTR_SP_5_LO, REG_A5XX_RBBM_PERFCTR_SP_5_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_5 }, + REG_A5XX_SP_PERFCTR_SP_SEL_5, 73 }, { REG_A5XX_RBBM_PERFCTR_SP_6_LO, REG_A5XX_RBBM_PERFCTR_SP_6_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_6 }, + REG_A5XX_SP_PERFCTR_SP_SEL_6, 74 }, { REG_A5XX_RBBM_PERFCTR_SP_7_LO, REG_A5XX_RBBM_PERFCTR_SP_7_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_7 }, + REG_A5XX_SP_PERFCTR_SP_SEL_7, 75 }, { REG_A5XX_RBBM_PERFCTR_SP_8_LO, REG_A5XX_RBBM_PERFCTR_SP_8_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_8 }, + REG_A5XX_SP_PERFCTR_SP_SEL_8, 76 }, { REG_A5XX_RBBM_PERFCTR_SP_9_LO, REG_A5XX_RBBM_PERFCTR_SP_9_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_9 }, + REG_A5XX_SP_PERFCTR_SP_SEL_9, 77 }, { REG_A5XX_RBBM_PERFCTR_SP_10_LO, REG_A5XX_RBBM_PERFCTR_SP_10_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_10 }, + REG_A5XX_SP_PERFCTR_SP_SEL_10, 78 }, { REG_A5XX_RBBM_PERFCTR_SP_11_LO, REG_A5XX_RBBM_PERFCTR_SP_11_HI, - REG_A5XX_SP_PERFCTR_SP_SEL_11 }, + REG_A5XX_SP_PERFCTR_SP_SEL_11, 79 }, }; static struct adreno_counter a5xx_counters_tp[] = { { REG_A5XX_RBBM_PERFCTR_TP_0_LO, REG_A5XX_RBBM_PERFCTR_TP_0_HI, - REG_A5XX_TPL1_PERFCTR_TP_SEL_0 }, + REG_A5XX_TPL1_PERFCTR_TP_SEL_0, 60 }, { REG_A5XX_RBBM_PERFCTR_TP_1_LO, REG_A5XX_RBBM_PERFCTR_TP_1_HI, - REG_A5XX_TPL1_PERFCTR_TP_SEL_1 }, + REG_A5XX_TPL1_PERFCTR_TP_SEL_1, 61 }, { REG_A5XX_RBBM_PERFCTR_TP_2_LO, REG_A5XX_RBBM_PERFCTR_TP_2_HI, - REG_A5XX_TPL1_PERFCTR_TP_SEL_2 }, + REG_A5XX_TPL1_PERFCTR_TP_SEL_2, 62 }, { REG_A5XX_RBBM_PERFCTR_TP_3_LO, REG_A5XX_RBBM_PERFCTR_TP_3_HI, - REG_A5XX_TPL1_PERFCTR_TP_SEL_3 }, + REG_A5XX_TPL1_PERFCTR_TP_SEL_3, 63 }, { REG_A5XX_RBBM_PERFCTR_TP_4_LO, REG_A5XX_RBBM_PERFCTR_TP_4_HI, - REG_A5XX_TPL1_PERFCTR_TP_SEL_4 }, + REG_A5XX_TPL1_PERFCTR_TP_SEL_4, 64 }, { REG_A5XX_RBBM_PERFCTR_TP_5_LO, REG_A5XX_RBBM_PERFCTR_TP_5_HI, - REG_A5XX_TPL1_PERFCTR_TP_SEL_5 }, + REG_A5XX_TPL1_PERFCTR_TP_SEL_5, 65 }, { REG_A5XX_RBBM_PERFCTR_TP_6_LO, REG_A5XX_RBBM_PERFCTR_TP_6_HI, - REG_A5XX_TPL1_PERFCTR_TP_SEL_6 }, + REG_A5XX_TPL1_PERFCTR_TP_SEL_6, 66 }, { REG_A5XX_RBBM_PERFCTR_TP_7_LO, REG_A5XX_RBBM_PERFCTR_TP_7_HI, - REG_A5XX_TPL1_PERFCTR_TP_SEL_7 }, + REG_A5XX_TPL1_PERFCTR_TP_SEL_7, 67 }, }; static struct adreno_counter a5xx_counters_tse[] = { { REG_A5XX_RBBM_PERFCTR_TSE_0_LO, REG_A5XX_RBBM_PERFCTR_TSE_0_HI, - REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 }, + REG_A5XX_GRAS_PERFCTR_TSE_SEL_0, 44 }, { REG_A5XX_RBBM_PERFCTR_TSE_1_LO, REG_A5XX_RBBM_PERFCTR_TSE_1_HI, - REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 }, + REG_A5XX_GRAS_PERFCTR_TSE_SEL_1, 45 }, { REG_A5XX_RBBM_PERFCTR_TSE_2_LO, REG_A5XX_RBBM_PERFCTR_TSE_2_HI, - REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 }, + REG_A5XX_GRAS_PERFCTR_TSE_SEL_2, 46 }, { REG_A5XX_RBBM_PERFCTR_TSE_3_LO, REG_A5XX_RBBM_PERFCTR_TSE_3_HI, - REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 }, + REG_A5XX_GRAS_PERFCTR_TSE_SEL_3, 47 }, }; static struct adreno_counter a5xx_counters_uche[] = { { REG_A5XX_RBBM_PERFCTR_UCHE_0_LO, REG_A5XX_RBBM_PERFCTR_UCHE_0_HI, - REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 }, + REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0, 52 }, { REG_A5XX_RBBM_PERFCTR_UCHE_1_LO, REG_A5XX_RBBM_PERFCTR_UCHE_1_HI, - REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 }, + REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1, 53 }, { REG_A5XX_RBBM_PERFCTR_UCHE_2_LO, REG_A5XX_RBBM_PERFCTR_UCHE_2_HI, - REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 }, + REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2, 54 }, { REG_A5XX_RBBM_PERFCTR_UCHE_3_LO, REG_A5XX_RBBM_PERFCTR_UCHE_3_HI, - REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 }, + REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3, 55 }, { REG_A5XX_RBBM_PERFCTR_UCHE_4_LO, REG_A5XX_RBBM_PERFCTR_UCHE_4_HI, - REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 }, + REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4, 56 }, { REG_A5XX_RBBM_PERFCTR_UCHE_5_LO, REG_A5XX_RBBM_PERFCTR_UCHE_5_HI, - REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 }, + REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5, 57 }, { REG_A5XX_RBBM_PERFCTR_UCHE_6_LO, REG_A5XX_RBBM_PERFCTR_UCHE_6_HI, - REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 }, + REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6, 58 }, { REG_A5XX_RBBM_PERFCTR_UCHE_7_LO, REG_A5XX_RBBM_PERFCTR_UCHE_7_HI, - REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 }, + REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7, 59 }, }; static struct adreno_counter a5xx_counters_vfd[] = { { REG_A5XX_RBBM_PERFCTR_VFD_0_LO, REG_A5XX_RBBM_PERFCTR_VFD_0_HI, - REG_A5XX_VFD_PERFCTR_VFD_SEL_0 }, + REG_A5XX_VFD_PERFCTR_VFD_SEL_0, 20 }, { REG_A5XX_RBBM_PERFCTR_VFD_1_LO, REG_A5XX_RBBM_PERFCTR_VFD_1_HI, - REG_A5XX_VFD_PERFCTR_VFD_SEL_1 }, + REG_A5XX_VFD_PERFCTR_VFD_SEL_1, 21 }, { REG_A5XX_RBBM_PERFCTR_VFD_2_LO, REG_A5XX_RBBM_PERFCTR_VFD_2_HI, - REG_A5XX_VFD_PERFCTR_VFD_SEL_2 }, + REG_A5XX_VFD_PERFCTR_VFD_SEL_2, 22 }, { REG_A5XX_RBBM_PERFCTR_VFD_3_LO, REG_A5XX_RBBM_PERFCTR_VFD_3_HI, - REG_A5XX_VFD_PERFCTR_VFD_SEL_3 }, + REG_A5XX_VFD_PERFCTR_VFD_SEL_3, 23 }, { REG_A5XX_RBBM_PERFCTR_VFD_4_LO, REG_A5XX_RBBM_PERFCTR_VFD_4_HI, - REG_A5XX_VFD_PERFCTR_VFD_SEL_4 }, + REG_A5XX_VFD_PERFCTR_VFD_SEL_4, 24 }, { REG_A5XX_RBBM_PERFCTR_VFD_5_LO, REG_A5XX_RBBM_PERFCTR_VFD_5_HI, - REG_A5XX_VFD_PERFCTR_VFD_SEL_5 }, + REG_A5XX_VFD_PERFCTR_VFD_SEL_5, 25 }, { REG_A5XX_RBBM_PERFCTR_VFD_6_LO, REG_A5XX_RBBM_PERFCTR_VFD_6_HI, - REG_A5XX_VFD_PERFCTR_VFD_SEL_6 }, + REG_A5XX_VFD_PERFCTR_VFD_SEL_6, 26 }, { REG_A5XX_RBBM_PERFCTR_VFD_7_LO, REG_A5XX_RBBM_PERFCTR_VFD_7_HI, - REG_A5XX_VFD_PERFCTR_VFD_SEL_7 }, + REG_A5XX_VFD_PERFCTR_VFD_SEL_7, 27 }, }; static struct adreno_counter a5xx_counters_vpc[] = { { REG_A5XX_RBBM_PERFCTR_VPC_0_LO, REG_A5XX_RBBM_PERFCTR_VPC_0_HI, - REG_A5XX_VPC_PERFCTR_VPC_SEL_0 }, + REG_A5XX_VPC_PERFCTR_VPC_SEL_0, 36 }, { REG_A5XX_RBBM_PERFCTR_VPC_1_LO, REG_A5XX_RBBM_PERFCTR_VPC_1_HI, - REG_A5XX_VPC_PERFCTR_VPC_SEL_1 }, + REG_A5XX_VPC_PERFCTR_VPC_SEL_1, 37 }, { REG_A5XX_RBBM_PERFCTR_VPC_2_LO, REG_A5XX_RBBM_PERFCTR_VPC_2_HI, - REG_A5XX_VPC_PERFCTR_VPC_SEL_2 }, + REG_A5XX_VPC_PERFCTR_VPC_SEL_2, 38 }, { REG_A5XX_RBBM_PERFCTR_VPC_3_LO, REG_A5XX_RBBM_PERFCTR_VPC_3_HI, - REG_A5XX_VPC_PERFCTR_VPC_SEL_3 }, + REG_A5XX_VPC_PERFCTR_VPC_SEL_3, 39 }, }; static struct adreno_counter a5xx_counters_vsc[] = { { REG_A5XX_RBBM_PERFCTR_VSC_0_LO, REG_A5XX_RBBM_PERFCTR_VSC_0_HI, - REG_A5XX_VSC_PERFCTR_VSC_SEL_0 }, + REG_A5XX_VSC_PERFCTR_VSC_SEL_0, 88 }, { REG_A5XX_RBBM_PERFCTR_VSC_1_LO, REG_A5XX_RBBM_PERFCTR_VSC_1_HI, - REG_A5XX_VSC_PERFCTR_VSC_SEL_1 }, + REG_A5XX_VSC_PERFCTR_VSC_SEL_1, 89 }, }; static struct adreno_counter a5xx_counters_power_ccu[] = { { REG_A5XX_CCU_POWER_COUNTER_0_LO, REG_A5XX_CCU_POWER_COUNTER_0_HI, - REG_A5XX_RB_POWERCTR_CCU_SEL_0 }, + REG_A5XX_RB_POWERCTR_CCU_SEL_0, 40 }, { REG_A5XX_CCU_POWER_COUNTER_1_LO, REG_A5XX_CCU_POWER_COUNTER_1_HI, - REG_A5XX_RB_POWERCTR_CCU_SEL_1 }, + REG_A5XX_RB_POWERCTR_CCU_SEL_1, 41 }, }; static struct adreno_counter a5xx_counters_power_cp[] = { @@ -590,39 +686,47 @@ static struct adreno_counter a5xx_counters_alwayson_power[] = { REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI }, }; -#define DEFINE_COUNTER_GROUP(_name, _array, _get, _enable, _put) \ -static struct adreno_counter_group _name = { \ - .counters = _array, \ - .nr_counters = ARRAY_SIZE(_array), \ +#define DEFINE_COUNTER_GROUP(_n, _a, _get, _enable, _put, _save, _restore) \ +static struct adreno_counter_group _n = { \ + .counters = _a, \ + .nr_counters = ARRAY_SIZE(_a), \ .lock = __SPIN_LOCK_UNLOCKED(_name.lock), \ .funcs = { \ .get = _get, \ .enable = _enable, \ .read = a5xx_counter_read, \ .put = _put, \ + .save = _save, \ + .restore = _restore \ }, \ } -#define DEFAULT_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \ - _array, a5xx_counter_get, a5xx_counter_enable_cpu, a5xx_counter_put) +#define COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \ + _array, a5xx_counter_get, a5xx_counter_enable_cpu, a5xx_counter_put, \ + a5xx_counter_save, a5xx_counter_restore) #define SPTP_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \ - _array, a5xx_counter_get, a5xx_counter_enable_pm4, a5xx_counter_put) + _array, a5xx_counter_get, a5xx_counter_enable_pm4, a5xx_counter_put, \ + a5xx_counter_save, a5xx_counter_restore) + +#define POWER_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \ + _array, a5xx_counter_get, a5xx_counter_enable_cpu, a5xx_counter_put, \ + NULL, NULL) /* "standard" counters */ -DEFAULT_COUNTER_GROUP(a5xx_counter_group_cp, a5xx_counters_cp); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_rbbm, a5xx_counters_rbbm); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_pc, a5xx_counters_pc); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_vfd, a5xx_counters_vfd); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_vpc, a5xx_counters_vpc); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_ccu, a5xx_counters_ccu); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_cmp, a5xx_counters_cmp); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_tse, a5xx_counters_tse); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_ras, a5xx_counters_ras); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_uche, a5xx_counters_uche); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_rb, a5xx_counters_rb); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_vsc, a5xx_counters_vsc); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_lrz, a5xx_counters_lrz); +COUNTER_GROUP(a5xx_counter_group_cp, a5xx_counters_cp); +COUNTER_GROUP(a5xx_counter_group_rbbm, a5xx_counters_rbbm); +COUNTER_GROUP(a5xx_counter_group_pc, a5xx_counters_pc); +COUNTER_GROUP(a5xx_counter_group_vfd, a5xx_counters_vfd); +COUNTER_GROUP(a5xx_counter_group_vpc, a5xx_counters_vpc); +COUNTER_GROUP(a5xx_counter_group_ccu, a5xx_counters_ccu); +COUNTER_GROUP(a5xx_counter_group_cmp, a5xx_counters_cmp); +COUNTER_GROUP(a5xx_counter_group_tse, a5xx_counters_tse); +COUNTER_GROUP(a5xx_counter_group_ras, a5xx_counters_ras); +COUNTER_GROUP(a5xx_counter_group_uche, a5xx_counters_uche); +COUNTER_GROUP(a5xx_counter_group_rb, a5xx_counters_rb); +COUNTER_GROUP(a5xx_counter_group_vsc, a5xx_counters_vsc); +COUNTER_GROUP(a5xx_counter_group_lrz, a5xx_counters_lrz); /* SP/TP counters */ SPTP_COUNTER_GROUP(a5xx_counter_group_hlsq, a5xx_counters_hlsq); @@ -630,24 +734,27 @@ SPTP_COUNTER_GROUP(a5xx_counter_group_tp, a5xx_counters_tp); SPTP_COUNTER_GROUP(a5xx_counter_group_sp, a5xx_counters_sp); /* Power counters */ -DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_ccu, a5xx_counters_power_ccu); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_cp, a5xx_counters_power_cp); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_rb, a5xx_counters_power_rb); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_sp, a5xx_counters_power_sp); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_tp, a5xx_counters_power_tp); -DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_uche, a5xx_counters_power_uche); +POWER_COUNTER_GROUP(a5xx_counter_group_power_ccu, a5xx_counters_power_ccu); +POWER_COUNTER_GROUP(a5xx_counter_group_power_cp, a5xx_counters_power_cp); +POWER_COUNTER_GROUP(a5xx_counter_group_power_rb, a5xx_counters_power_rb); +POWER_COUNTER_GROUP(a5xx_counter_group_power_sp, a5xx_counters_power_sp); +POWER_COUNTER_GROUP(a5xx_counter_group_power_tp, a5xx_counters_power_tp); +POWER_COUNTER_GROUP(a5xx_counter_group_power_uche, a5xx_counters_power_uche); DEFINE_COUNTER_GROUP(a5xx_counter_group_alwayson, a5xx_counters_alwayson, - a5xx_counter_get_fixed, NULL, NULL); + a5xx_counter_get_fixed, NULL, NULL, NULL, NULL); DEFINE_COUNTER_GROUP(a5xx_counter_group_vbif, a5xx_counters_vbif, - a5xx_counter_get, a5xx_counter_enable_vbif, a5xx_counter_put); + a5xx_counter_get, a5xx_counter_enable_vbif, a5xx_counter_put, + NULL, NULL); DEFINE_COUNTER_GROUP(a5xx_counter_group_gpmu, a5xx_counters_gpmu, - a5xx_counter_get, a5xx_counter_enable_gpmu, a5xx_counter_put); + a5xx_counter_get, a5xx_counter_enable_gpmu, a5xx_counter_put, + NULL, NULL); DEFINE_COUNTER_GROUP(a5xx_counter_group_vbif_power, a5xx_counters_vbif_power, - a5xx_counter_get_fixed, a5xx_counter_enable_vbif_power, NULL); + a5xx_counter_get_fixed, a5xx_counter_enable_vbif_power, NULL, NULL, + NULL); DEFINE_COUNTER_GROUP(a5xx_counter_group_alwayson_power, a5xx_counters_alwayson_power, a5xx_counter_get_fixed, - a5xx_counter_enable_alwayson_power, NULL); + a5xx_counter_enable_alwayson_power, NULL, NULL, NULL); static const struct adreno_counter_group *a5xx_counter_groups[] = { [MSM_COUNTER_GROUP_ALWAYSON] = &a5xx_counter_group_alwayson, @@ -680,6 +787,35 @@ static const struct adreno_counter_group *a5xx_counter_groups[] = { &a5xx_counter_group_alwayson_power, }; +void a5xx_counters_restore(struct msm_gpu *gpu) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(a5xx_counter_groups); i++) { + struct adreno_counter_group *group = + (struct adreno_counter_group *) a5xx_counter_groups[i]; + + if (group && group->funcs.restore) + group->funcs.restore(gpu, group); + + a5xx_counter_group_enable(gpu, group, true); + } +} + + +void a5xx_counters_save(struct msm_gpu *gpu) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(a5xx_counter_groups); i++) { + struct adreno_counter_group *group = + (struct adreno_counter_group *) a5xx_counter_groups[i]; + + if (group && group->funcs.save) + group->funcs.save(gpu, group); + } +} + int a5xx_counters_init(struct adreno_gpu *adreno_gpu) { adreno_gpu->counter_groups = a5xx_counter_groups; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 765c1c087c76..e493c2fee762 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -15,7 +15,6 @@ #include "msm_iommu.h" #include "msm_trace.h" #include "a5xx_gpu.h" -#include <linux/clk/msm-clk.h> #define SECURE_VA_START 0xc0000000 #define SECURE_VA_SIZE SZ_256M @@ -1170,25 +1169,14 @@ static int a5xx_pm_resume(struct msm_gpu *gpu) { int ret; - /* - * Between suspend/resumes the GPU clocks need to be turned off - * but not a complete power down, typically between frames. Set the - * memory retention flags on the GPU core clock to retain memory - * across clock toggles. - */ - if (gpu->core_clk) { - clk_set_flags(gpu->core_clk, CLKFLAG_RETAIN_PERIPH); - clk_set_flags(gpu->core_clk, CLKFLAG_RETAIN_MEM); - } - /* Turn on the core power */ ret = msm_gpu_pm_resume(gpu); if (ret) return ret; - /* If we are already up, don't mess with what works */ - if (gpu->active_cnt > 1) - return 0; + + /* Restore all the counters before turning on the GPMU */ + a5xx_counters_restore(gpu); /* Turn the RBCCU domain first to limit the chances of voltage droop */ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000); @@ -1220,33 +1208,26 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - /* Turn off the memory retention flag when not necessary */ - if (gpu->core_clk) { - clk_set_flags(gpu->core_clk, CLKFLAG_NORETAIN_PERIPH); - clk_set_flags(gpu->core_clk, CLKFLAG_NORETAIN_MEM); - } - - /* Only do this next bit if we are about to go down */ - if (gpu->active_cnt == 1) { - /* Clear the VBIF pipe before shutting down */ - - gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF); - spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) + /* Clear the VBIF pipe before shutting down */ + gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF); + spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF); - gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0); + gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0); - /* - * Reset the VBIF before power collapse to avoid issue with FIFO - * entries - */ - if (adreno_is_a530(adreno_gpu)) { - /* These only need to be done for A530 */ - gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, + /* Save the counters before going down */ + a5xx_counters_save(gpu); + + /* + * Reset the VBIF before power collapse to avoid issue with FIFO + * entries + */ + if (adreno_is_a530(adreno_gpu)) { + /* These only need to be done for A530 */ + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000); - gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, + gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000); - } } return msm_gpu_pm_suspend(gpu); @@ -1266,29 +1247,10 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) #ifdef CONFIG_DEBUG_FS static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); - bool enabled = test_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags); - - gpu->funcs->pm_resume(gpu); - seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A5XX_RBBM_STATUS)); - - /* - * Temporarily disable hardware clock gating before going into - * adreno_show to avoid issues while reading the registers - */ - - if (enabled) - a5xx_set_hwcg(gpu, false); - adreno_show(gpu, m); - if (enabled) - a5xx_set_hwcg(gpu, true); - - gpu->funcs->pm_suspend(gpu); } #endif diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index c30b65785ab6..9c62f861136d 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -194,5 +194,7 @@ static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu) } int a5xx_counters_init(struct adreno_gpu *adreno_gpu); +void a5xx_counters_save(struct msm_gpu *gpu); +void a5xx_counters_restore(struct msm_gpu *gpu); #endif /* __A5XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 4e4709d6172f..4ecc3ad762ef 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -164,13 +164,10 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) if (gpu) { int ret; - mutex_lock(&dev->struct_mutex); - gpu->funcs->pm_resume(gpu); - mutex_unlock(&dev->struct_mutex); - disable_irq(gpu->irq); - - ret = gpu->funcs->hw_init(gpu); + pm_runtime_get_sync(&pdev->dev); + ret = msm_gpu_hw_init(gpu); + pm_runtime_put_sync_autosuspend(&pdev->dev); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); mutex_lock(&dev->struct_mutex); @@ -178,10 +175,6 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); gpu->funcs->destroy(gpu); gpu = NULL; - } else { - enable_irq(gpu->irq); - /* give inactive pm a chance to kick in: */ - msm_gpu_retire(gpu); } } @@ -250,12 +243,35 @@ static const struct of_device_id dt_match[] = { {} }; +#ifdef CONFIG_PM +static int adreno_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_gpu *gpu = platform_get_drvdata(pdev); + + return gpu->funcs->pm_resume(gpu); +} + +static int adreno_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_gpu *gpu = platform_get_drvdata(pdev); + + return gpu->funcs->pm_suspend(gpu); +} +#endif + +static const struct dev_pm_ops adreno_pm_ops = { + SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL) +}; + static struct platform_driver adreno_driver = { .probe = adreno_probe, .remove = adreno_remove, .driver = { .name = "adreno", .of_match_table = dt_match, + .pm = &adreno_pm_ops, }, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 04e0056f2a49..d397c44f1203 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) *value = gpu->gpufreq[gpu->active_level]; return 0; case MSM_PARAM_TIMESTAMP: - if (adreno_gpu->funcs->get_timestamp) - return adreno_gpu->funcs->get_timestamp(gpu, value); + if (adreno_gpu->funcs->get_timestamp) { + int ret; + + pm_runtime_get_sync(&gpu->pdev->dev); + ret = adreno_gpu->funcs->get_timestamp(gpu, value); + pm_runtime_put_autosuspend(&gpu->pdev->dev); + + return ret; + } return -EINVAL; case MSM_PARAM_NR_RINGS: *value = gpu->nr_rings; @@ -68,14 +75,25 @@ int adreno_hw_init(struct msm_gpu *gpu) DBG("%s", gpu->name); for (i = 0; i < gpu->nr_rings; i++) { - int ret = msm_gem_get_iova(gpu->rb[i]->bo, gpu->aspace, - &gpu->rb[i]->iova); + struct msm_ringbuffer *ring = gpu->rb[i]; + + int ret = msm_gem_get_iova(ring->bo, gpu->aspace, + &ring->iova); if (ret) { - gpu->rb[i]->iova = 0; + ring->iova = 0; dev_err(gpu->dev->dev, "could not map ringbuffer %d: %d\n", i, ret); return ret; } + + /* reset ringbuffer(s): */ + /* No need for a lock here, nobody else is peeking in */ + ring->cur = ring->start; + ring->next = ring->start; + + /* reset completed fence seqno, discard anything pending: */ + ring->memptrs->fence = adreno_submitted_fence(gpu, ring); + ring->memptrs->rptr = 0; } /* @@ -133,35 +151,22 @@ uint32_t adreno_submitted_fence(struct msm_gpu *gpu, void adreno_recover(struct msm_gpu *gpu) { struct drm_device *dev = gpu->dev; - struct msm_ringbuffer *ring; - int ret, i; - - gpu->funcs->pm_suspend(gpu); - - /* reset ringbuffer(s): */ - - FOR_EACH_RING(gpu, ring, i) { - if (!ring) - continue; + int ret; - /* No need for a lock here, nobody else is peeking in */ - ring->cur = ring->start; - ring->next = ring->start; + /* + * XXX pm-runtime?? we *need* the device to be off after this + * so maybe continuing to call ->pm_suspend/resume() is better? + */ - /* reset completed fence seqno, discard anything pending: */ - ring->memptrs->fence = adreno_submitted_fence(gpu, ring); - ring->memptrs->rptr = 0; - } + gpu->funcs->pm_suspend(gpu); gpu->funcs->pm_resume(gpu); - disable_irq(gpu->irq); - ret = gpu->funcs->hw_init(gpu); + ret = msm_gpu_hw_init(gpu); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); /* hmm, oh well? */ } - enable_irq(gpu->irq); } void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) @@ -520,6 +525,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (ret) return ret; + pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); if (ret) { dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", @@ -535,12 +544,18 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } -void adreno_gpu_cleanup(struct adreno_gpu *gpu) +void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) { - release_firmware(gpu->pm4); - release_firmware(gpu->pfp); + struct msm_gpu *gpu = &adreno_gpu->base; + struct drm_device *dev = gpu->dev; + struct msm_drm_private *priv = dev->dev_private; + struct platform_device *pdev = priv->gpu_pdev; + + release_firmware(adreno_gpu->pm4); + release_firmware(adreno_gpu->pfp); - msm_gpu_cleanup(&gpu->base); + pm_runtime_disable(&pdev->dev); + msm_gpu_cleanup(gpu); } static void adreno_snapshot_os(struct msm_gpu *gpu, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index c96189fb805b..462352f7fc9a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -87,8 +87,10 @@ struct adreno_counter { u32 lo; u32 hi; u32 sel; + int load_bit; u32 countable; u32 refcount; + u64 value; }; struct adreno_counter_group { @@ -99,11 +101,15 @@ struct adreno_counter_group { int (*get)(struct msm_gpu *, struct adreno_counter_group *, u32, u32 *, u32 *); void (*enable)(struct msm_gpu *, - struct adreno_counter_group *, int); + struct adreno_counter_group *, int, bool); u64 (*read)(struct msm_gpu *, struct adreno_counter_group *, int); void (*put)(struct msm_gpu *, struct adreno_counter_group *, int); + void (*save)(struct msm_gpu *, + struct adreno_counter_group *); + void (*restore)(struct msm_gpu *, + struct adreno_counter_group *); } funcs; }; diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c index c98f4511d644..fa111d581529 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c @@ -2153,6 +2153,8 @@ int sde_hdmi_get_property(struct drm_connector *connector, mutex_lock(&hdmi_display->display_lock); if (property_index == CONNECTOR_PROP_PLL_ENABLE) *value = hdmi_display->pll_update_enable ? 1 : 0; + if (property_index == CONNECTOR_PROP_HDCP_VERSION) + *value = hdmi_display->sink_hdcp_ver; mutex_unlock(&hdmi_display->display_lock); return rc; diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h index 672a9f188d27..865998c6a126 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h @@ -108,14 +108,34 @@ enum hdmi_tx_feature_type { * @mode: Current display mode. * @connected: If HDMI display is connected. * @is_tpg_enabled: TPG state. + * @hdmi_tx_version: HDMI TX version + * @hdmi_tx_major_version: HDMI TX major version + * @max_pclk_khz: Max pixel clock supported + * @hdcp1_use_sw_keys: If HDCP1 engine uses SW keys + * @hdcp14_present: If the sink supports HDCP 1.4 + * @hdcp22_present: If the sink supports HDCP 2.2 + * @hdcp_status: Current HDCP status + * @sink_hdcp_ver: HDCP version of the sink + * @enc_lvl: Current encryption level + * @curr_hdr_state: Current HDR state of the HDMI connector + * @auth_state: Current authentication state of HDCP + * @sink_hdcp22_support: If the sink supports HDCP 2.2 + * @src_hdcp22_support: If the source supports HDCP 2.2 + * @hdcp_data: Call back data registered by the client with HDCP lib + * @hdcp_feat_data: Handle to HDCP feature data + * @hdcp_ops: Function ops registered by the client with the HDCP lib + * @ddc_ctrl: Handle to HDMI DDC Controller * @hpd_work: HPD work structure. * @codec_ready: If audio codec is ready. * @client_notify_pending: If there is client notification pending. * @irq_domain: IRQ domain structure. + * @notifier: CEC notifider to convey physical address information. * @pll_update_enable: if it's allowed to update HDMI PLL ppm. * @dc_enable: If deep color is enabled. Only DC_30 so far. * @dc_feature_supported: If deep color feature is supported. - * @notifier: CEC notifider to convey physical address information. + * @bt2020_colorimetry: If BT2020 colorimetry is supported by sink + * @hdcp_cb_work: Callback function for HDCP + * @io: Handle to IO base addresses for HDMI * @root: Debug fs root entry. */ struct sde_hdmi { @@ -146,6 +166,7 @@ struct sde_hdmi { u32 hdcp14_present; u32 hdcp22_present; u8 hdcp_status; + u8 sink_hdcp_ver; u32 enc_lvl; u8 curr_hdr_state; bool auth_state; diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c index e6b6d15b5fb7..3c470caec571 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c @@ -511,6 +511,11 @@ static void sde_hdmi_update_hdcp_info(struct drm_connector *connector) } } + if (display->sink_hdcp22_support) + display->sink_hdcp_ver = SDE_HDMI_HDCP_22; + else + display->sink_hdcp_ver = SDE_HDMI_HDCP_14; + /* update internal data about hdcp */ display->hdcp_data = fd; display->hdcp_ops = ops; @@ -543,6 +548,7 @@ static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge) mutex_lock(&display->display_lock); display->pll_update_enable = false; + display->sink_hdcp_ver = SDE_HDMI_HDCP_NONE; mutex_unlock(&display->display_lock); } diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h index 3c6b0f1b9dd4..421bdf7643ca 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h @@ -105,6 +105,10 @@ #define SDE_HDMI_USE_EXTENDED_COLORIMETRY 0x3 #define SDE_HDMI_BT2020_COLORIMETRY 0x6 +#define SDE_HDMI_HDCP_22 0x22 +#define SDE_HDMI_HDCP_14 0x14 +#define SDE_HDMI_HDCP_NONE 0x0 + /* * Bits 1:0 in HDMI_HW_DDC_CTRL that dictate how the HDCP 2.2 RxStatus will be * read by the hardware diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 7d660ba56594..9dbd86eff816 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -424,7 +424,7 @@ static struct hdmi_platform_config hdmi_tx_8994_config = { static struct hdmi_platform_config hdmi_tx_8996_config = { .phy_init = NULL, HDMI_CFG(pwr_reg, none), - HDMI_CFG(hpd_reg, none), + HDMI_CFG(hpd_reg, 8x74), HDMI_CFG(pwr_clk, 8x74), HDMI_CFG(hpd_clk, 8x74), .hpd_freq = hpd_clk_freq_8x74, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c8b11425a817..c61753311771 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -185,9 +185,14 @@ static void vblank_ctrl_worker(struct kthread_work *work) struct msm_kms *kms = priv->kms; struct vblank_event *vbl_ev, *tmp; unsigned long flags; + struct kthread_worker *worker = work->worker; + struct msm_drm_commit *commit = container_of(worker, + struct msm_drm_commit, worker); spin_lock_irqsave(&vbl_ctrl->lock, flags); list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { + if (vbl_ev->crtc_id != commit->crtc_id) + continue; list_del(&vbl_ev->node); spin_unlock_irqrestore(&vbl_ctrl->lock, flags); @@ -280,6 +285,10 @@ static int msm_unload(struct drm_device *dev) if (gpu) { mutex_lock(&dev->struct_mutex); + /* + * XXX what do we do here? + * pm_runtime_enable(&pdev->dev); + */ gpu->funcs->pm_suspend(gpu); mutex_unlock(&dev->struct_mutex); gpu->funcs->destroy(gpu); @@ -293,7 +302,7 @@ static int msm_unload(struct drm_device *dev) priv->vram.paddr, &attrs); } - sde_evtlog_destroy(); + sde_dbg_destroy(); sde_power_client_destroy(&priv->phandle, priv->pclient); sde_power_resource_deinit(pdev, &priv->phandle); @@ -423,11 +432,17 @@ static int msm_component_bind_all(struct device *dev, } #endif +static int msm_power_enable_wrapper(void *handle, void *client, bool enable) +{ + return sde_power_resource_enable(handle, client, enable); +} + static int msm_load(struct drm_device *dev, unsigned long flags) { struct platform_device *pdev = dev->platformdev; struct msm_drm_private *priv; struct msm_kms *kms; + struct sde_dbg_power_ctrl dbg_power_ctrl = { NULL }; int ret, i; priv = kzalloc(sizeof(*priv), GFP_KERNEL); @@ -477,9 +492,13 @@ static int msm_load(struct drm_device *dev, unsigned long flags) if (ret) goto fail; - ret = sde_evtlog_init(dev->primary->debugfs_root); + dbg_power_ctrl.handle = &priv->phandle; + dbg_power_ctrl.client = priv->pclient; + dbg_power_ctrl.enable_fn = msm_power_enable_wrapper; + ret = sde_dbg_init(dev->primary->debugfs_root, &pdev->dev, + &dbg_power_ctrl); if (ret) { - dev_err(dev->dev, "failed to init evtlog: %d\n", ret); + dev_err(dev->dev, "failed to init sde dbg: %d\n", ret); goto fail; } @@ -659,10 +678,10 @@ static int msm_open(struct drm_device *dev, struct drm_file *file) if (IS_ERR(ctx)) return PTR_ERR(ctx); - if (ctx) + if (ctx) { INIT_LIST_HEAD(&ctx->counters); - - msm_submitqueue_init(ctx); + msm_submitqueue_init(ctx); + } file->driver_priv = ctx; @@ -896,7 +915,9 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) if (gpu) { seq_printf(m, "%s Status:\n", gpu->name); + pm_runtime_get_sync(&gpu->pdev->dev); gpu->funcs->show(gpu, m); + pm_runtime_put_sync(&gpu->pdev->dev); } return 0; @@ -2130,7 +2151,9 @@ static int msm_pdev_probe(struct platform_device *pdev) #ifdef CONFIG_OF add_components(&pdev->dev, &match, "connectors"); +#ifndef CONFIG_QCOM_KGSL add_components(&pdev->dev, &match, "gpus"); +#endif #else /* For non-DT case, it kinda sucks. We don't actually have a way * to know whether or not we are waiting for certain devices (or if diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 49b6029c3342..25dc5f9ef561 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -158,10 +158,12 @@ enum msm_mdp_conn_property { CONNECTOR_PROP_DST_H, CONNECTOR_PROP_PLL_DELTA, CONNECTOR_PROP_PLL_ENABLE, + CONNECTOR_PROP_HDCP_VERSION, /* enum/bitmask properties */ CONNECTOR_PROP_TOPOLOGY_NAME, CONNECTOR_PROP_TOPOLOGY_CONTROL, + CONNECTOR_PROP_LP, /* total # of properties */ CONNECTOR_PROP_COUNT diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 7c109fdab545..44d9784d1bd7 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -154,22 +154,9 @@ static int disable_axi(struct msm_gpu *gpu) int msm_gpu_pm_resume(struct msm_gpu *gpu) { - struct drm_device *dev = gpu->dev; - struct msm_drm_private *priv = dev->dev_private; - struct platform_device *pdev = priv->gpu_pdev; int ret; - DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - if (gpu->active_cnt++ > 0) - return 0; - - if (WARN_ON(gpu->active_cnt <= 0)) - return -EINVAL; - - WARN_ON(pm_runtime_get_sync(&pdev->dev) < 0); + DBG("%s", gpu->name); ret = enable_pwrrail(gpu); if (ret) @@ -186,25 +173,16 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu) if (gpu->aspace && gpu->aspace->mmu) msm_mmu_enable(gpu->aspace->mmu); + gpu->needs_hw_init = true; + return 0; } int msm_gpu_pm_suspend(struct msm_gpu *gpu) { - struct drm_device *dev = gpu->dev; - struct msm_drm_private *priv = dev->dev_private; - struct platform_device *pdev = priv->gpu_pdev; int ret; - DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - if (--gpu->active_cnt > 0) - return 0; - - if (WARN_ON(gpu->active_cnt < 0)) - return -EINVAL; + DBG("%s", gpu->name); if (gpu->aspace && gpu->aspace->mmu) msm_mmu_disable(gpu->aspace->mmu); @@ -221,57 +199,23 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu) if (ret) return ret; - pm_runtime_put(&pdev->dev); return 0; } -/* - * Inactivity detection (for suspend): - */ - -static void inactive_worker(struct work_struct *work) +int msm_gpu_hw_init(struct msm_gpu *gpu) { - struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work); - struct drm_device *dev = gpu->dev; - - if (gpu->inactive) - return; - - DBG("%s: inactive!\n", gpu->name); - mutex_lock(&dev->struct_mutex); - if (!(msm_gpu_active(gpu) || gpu->inactive)) { - disable_axi(gpu); - disable_clk(gpu); - gpu->inactive = true; - } - mutex_unlock(&dev->struct_mutex); -} - -static void inactive_handler(unsigned long data) -{ - struct msm_gpu *gpu = (struct msm_gpu *)data; - struct msm_drm_private *priv = gpu->dev->dev_private; + int ret; - queue_work(priv->wq, &gpu->inactive_work); -} + if (!gpu->needs_hw_init) + return 0; -/* cancel inactive timer and make sure we are awake: */ -static void inactive_cancel(struct msm_gpu *gpu) -{ - DBG("%s", gpu->name); - del_timer(&gpu->inactive_timer); - if (gpu->inactive) { - enable_clk(gpu); - enable_axi(gpu); - gpu->inactive = false; - } -} + disable_irq(gpu->irq); + ret = gpu->funcs->hw_init(gpu); + if (!ret) + gpu->needs_hw_init = false; + enable_irq(gpu->irq); -static void inactive_start(struct msm_gpu *gpu) -{ - DBG("%s", gpu->name); - mod_timer(&gpu->inactive_timer, - round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES)); + return ret; } static void retire_guilty_submit(struct msm_gpu *gpu, @@ -306,8 +250,6 @@ static void recover_worker(struct work_struct *work) struct msm_ringbuffer *ring; int i; - inactive_cancel(gpu); - /* Retire all events that have already passed */ FOR_EACH_RING(gpu, ring, i) retire_submits(gpu, ring, ring->memptrs->fence); @@ -316,6 +258,8 @@ static void recover_worker(struct work_struct *work) /* Recover the GPU */ gpu->funcs->recover(gpu); + /* Decrement the device usage count for the guilty submit */ + pm_runtime_put_sync_autosuspend(&gpu->pdev->dev); /* Replay the remaining on all rings, highest priority first */ for (i = 0; i < gpu->nr_rings; i++) { @@ -438,6 +382,8 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu) { unsigned long flags; + pm_runtime_get_sync(&gpu->pdev->dev); + spin_lock_irqsave(&gpu->perf_lock, flags); /* we could dynamically enable/disable perfcntr registers too.. */ gpu->last_sample.active = msm_gpu_active(gpu); @@ -451,6 +397,7 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu) void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) { gpu->perfcntr_active = false; + pm_runtime_put_sync(&gpu->pdev->dev); } /* returns -errno or # of cntrs sampled */ @@ -505,6 +452,8 @@ static void retire_submits(struct msm_gpu *gpu, struct msm_ringbuffer *ring, trace_msm_retired(submit, ticks->started, ticks->retired); + pm_runtime_mark_last_busy(&gpu->pdev->dev); + pm_runtime_put_autosuspend(&gpu->pdev->dev); msm_gem_submit_free(submit); } } @@ -550,9 +499,6 @@ static void retire_worker(struct work_struct *work) _retire_ring(gpu, ring, ring->memptrs->fence); mutex_unlock(&dev->struct_mutex); } - - if (!msm_gpu_active(gpu)) - inactive_start(gpu); } /* call from irq handler to schedule work to retire bo's */ @@ -574,7 +520,9 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) submit->fence = FENCE(submit->ring, ++ring->seqno); - inactive_cancel(gpu); + pm_runtime_get_sync(&gpu->pdev->dev); + + msm_gpu_hw_init(gpu); list_add_tail(&submit->node, &ring->submits); @@ -863,23 +811,12 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->dev = drm; gpu->funcs = funcs; gpu->name = name; - /* - * Set the inactive flag to false, so that when the retire worker - * kicks in from the init path, it knows that it has to turn off the - * clocks. This should be fine to do since this is the init sequence - * and we have an init_lock in msm_open() to protect against bad things - * from happening. - */ - gpu->inactive = false; INIT_LIST_HEAD(&gpu->active_list); INIT_WORK(&gpu->retire_work, retire_worker); - INIT_WORK(&gpu->inactive_work, inactive_worker); INIT_WORK(&gpu->recover_work, recover_worker); - setup_timer(&gpu->inactive_timer, inactive_handler, - (unsigned long)gpu); setup_timer(&gpu->hangcheck_timer, hangcheck_handler, (unsigned long)gpu); @@ -909,8 +846,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, goto fail; } - pm_runtime_enable(&pdev->dev); - ret = get_clocks(pdev, gpu); if (ret) goto fail; @@ -979,6 +914,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, pm_qos_add_request(&gpu->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); + gpu->pdev = pdev; + platform_set_drvdata(pdev, gpu); bs_init(gpu); @@ -1000,7 +937,6 @@ fail: msm_gpu_destroy_address_space(gpu->aspace); msm_gpu_destroy_address_space(gpu->secure_aspace); - pm_runtime_disable(&pdev->dev); return ret; } @@ -1031,7 +967,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) } msm_snapshot_destroy(gpu, gpu->snapshot); - pm_runtime_disable(&pdev->dev); msm_gpu_destroy_address_space(gpu->aspace); msm_gpu_destroy_address_space(gpu->secure_aspace); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index eeebfb746f7f..deb12aed5b28 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -83,6 +83,7 @@ struct msm_gpu_funcs { struct msm_gpu { const char *name; struct drm_device *dev; + struct platform_device *pdev; const struct msm_gpu_funcs *funcs; /* performance counters (hw & sw): */ @@ -103,9 +104,8 @@ struct msm_gpu { /* list of GEM active objects: */ struct list_head active_list; - /* is gpu powered/active? */ - int active_cnt; - bool inactive; + /* does gpu need hw_init? */ + bool needs_hw_init; /* worker for handling active-list retiring: */ struct work_struct retire_work; @@ -139,9 +139,7 @@ struct msm_gpu { /* Hang and Inactivity Detection: */ #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ -#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD) - struct timer_list inactive_timer; - struct work_struct inactive_work; + #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ #define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) struct timer_list hangcheck_timer; @@ -255,6 +253,8 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) int msm_gpu_pm_suspend(struct msm_gpu *gpu); int msm_gpu_pm_resume(struct msm_gpu *gpu); +int msm_gpu_hw_init(struct msm_gpu *gpu); + void msm_gpu_perfcntr_start(struct msm_gpu *gpu); void msm_gpu_perfcntr_stop(struct msm_gpu *gpu); int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 5fa4c21060f9..6a741a7ce0f6 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -10,12 +10,13 @@ * GNU General Public License for more details. */ -#define pr_fmt(fmt) "sde-drm:[%s] " fmt, __func__ +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ #include "msm_drv.h" #include "sde_kms.h" #include "sde_connector.h" #include "sde_backlight.h" +#include "sde_splash.h" #define SDE_DEBUG_CONN(c, fmt, ...) SDE_DEBUG("conn%d " fmt,\ (c) ? (c)->base.base.id : -1, ##__VA_ARGS__) @@ -38,6 +39,13 @@ static const struct drm_prop_enum_list e_topology_control[] = { {SDE_RM_TOPCTL_PPSPLIT, "ppsplit"} }; +static const struct drm_prop_enum_list e_power_mode[] = { + {SDE_MODE_DPMS_ON, "ON"}, + {SDE_MODE_DPMS_LP1, "LP1"}, + {SDE_MODE_DPMS_LP2, "LP2"}, + {SDE_MODE_DPMS_OFF, "OFF"}, +}; + int sde_connector_get_info(struct drm_connector *connector, struct msm_display_info *info) { @@ -155,6 +163,7 @@ static void sde_connector_destroy(struct drm_connector *connector) msm_property_destroy(&c_conn->property_info); drm_connector_unregister(connector); + mutex_destroy(&c_conn->lock); sde_fence_deinit(&c_conn->retire_fence); drm_connector_cleanup(connector); kfree(c_conn); @@ -353,6 +362,56 @@ static int _sde_connector_set_hdr_info( return 0; } +static int _sde_connector_update_power_locked(struct sde_connector *c_conn) +{ + struct drm_connector *connector; + void *display; + int (*set_power)(struct drm_connector *, int, void *); + int mode, rc = 0; + + if (!c_conn) + return -EINVAL; + connector = &c_conn->base; + + mode = c_conn->lp_mode; + if (c_conn->dpms_mode != DRM_MODE_DPMS_ON) + mode = SDE_MODE_DPMS_OFF; + switch (c_conn->dpms_mode) { + case DRM_MODE_DPMS_ON: + mode = c_conn->lp_mode; + break; + case DRM_MODE_DPMS_STANDBY: + mode = SDE_MODE_DPMS_STANDBY; + break; + case DRM_MODE_DPMS_SUSPEND: + mode = SDE_MODE_DPMS_SUSPEND; + break; + case DRM_MODE_DPMS_OFF: + mode = SDE_MODE_DPMS_OFF; + break; + default: + mode = c_conn->lp_mode; + SDE_ERROR("conn %d dpms set to unrecognized mode %d\n", + connector->base.id, mode); + break; + } + + SDE_DEBUG("conn %d - dpms %d, lp %d, panel %d\n", connector->base.id, + c_conn->dpms_mode, c_conn->lp_mode, mode); + + if (mode != c_conn->last_panel_power_mode && c_conn->ops.set_power) { + display = c_conn->display; + set_power = c_conn->ops.set_power; + + mutex_unlock(&c_conn->lock); + rc = set_power(connector, mode, display); + mutex_lock(&c_conn->lock); + } + c_conn->last_panel_power_mode = mode; + + return rc; +} + static int sde_connector_atomic_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, @@ -379,8 +438,8 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector, /* connector-specific property handling */ idx = msm_property_index(&c_conn->property_info, property); - - if (idx == CONNECTOR_PROP_OUT_FB) { + switch (idx) { + case CONNECTOR_PROP_OUT_FB: /* clear old fb, if present */ if (c_state->out_fb) _sde_connector_destroy_fb(c_conn, c_state); @@ -404,12 +463,20 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector, if (rc) SDE_ERROR("prep fb failed, %d\n", rc); } - } - - if (idx == CONNECTOR_PROP_TOPOLOGY_CONTROL) { + break; + case CONNECTOR_PROP_TOPOLOGY_CONTROL: rc = sde_rm_check_property_topctl(val); if (rc) SDE_ERROR("invalid topology_control: 0x%llX\n", val); + break; + case CONNECTOR_PROP_LP: + mutex_lock(&c_conn->lock); + c_conn->lp_mode = val; + _sde_connector_update_power_locked(c_conn); + mutex_unlock(&c_conn->lock); + break; + default: + break; } if (idx == CONNECTOR_PROP_HDR_CONTROL) { @@ -467,13 +534,7 @@ static int sde_connector_atomic_get_property(struct drm_connector *connector, idx = msm_property_index(&c_conn->property_info, property); if (idx == CONNECTOR_PROP_RETIRE_FENCE) - /* - * Set a fence offset if not a virtual connector, so that the - * fence signals after one additional commit rather than at the - * end of the current one. - */ - rc = sde_fence_create(&c_conn->retire_fence, val, - c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL); + rc = sde_fence_create(&c_conn->retire_fence, val, 0); else /* get cached property value */ rc = msm_property_atomic_get(&c_conn->property_info, @@ -501,13 +562,89 @@ void sde_connector_prepare_fence(struct drm_connector *connector) void sde_connector_complete_commit(struct drm_connector *connector) { + struct drm_device *dev; + struct msm_drm_private *priv; + struct sde_connector *c_conn; + struct sde_kms *sde_kms; + if (!connector) { SDE_ERROR("invalid connector\n"); return; } + dev = connector->dev; + priv = dev->dev_private; + sde_kms = to_sde_kms(priv->kms); + /* signal connector's retire fence */ sde_fence_signal(&to_sde_connector(connector)->retire_fence, 0); + + /* after first vsync comes, + * early splash resource should start to be released. + */ + if (sde_splash_get_lk_complete_status(&sde_kms->splash_info)) { + c_conn = to_sde_connector(connector); + + sde_splash_clean_up_free_resource(priv->kms, + &priv->phandle, + c_conn->connector_type, + c_conn->display); + } + +} + +static int sde_connector_dpms(struct drm_connector *connector, + int mode) +{ + struct sde_connector *c_conn; + + if (!connector) { + SDE_ERROR("invalid connector\n"); + return -EINVAL; + } + c_conn = to_sde_connector(connector); + + /* validate incoming dpms request */ + switch (mode) { + case DRM_MODE_DPMS_ON: + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + SDE_DEBUG("conn %d dpms set to %d\n", + connector->base.id, mode); + break; + default: + SDE_ERROR("conn %d dpms set to unrecognized mode %d\n", + connector->base.id, mode); + break; + } + + mutex_lock(&c_conn->lock); + c_conn->dpms_mode = mode; + _sde_connector_update_power_locked(c_conn); + mutex_unlock(&c_conn->lock); + + /* use helper for boilerplate handling */ + return drm_atomic_helper_connector_dpms(connector, mode); +} + +int sde_connector_get_dpms(struct drm_connector *connector) +{ + struct sde_connector *c_conn; + int rc; + + if (!connector) { + SDE_DEBUG("invalid connector\n"); + return DRM_MODE_DPMS_OFF; + } + + c_conn = to_sde_connector(connector); + + mutex_lock(&c_conn->lock); + rc = c_conn->dpms_mode; + mutex_unlock(&c_conn->lock); + + return rc; } static void sde_connector_update_hdr_props(struct drm_connector *connector) @@ -558,7 +695,7 @@ sde_connector_detect(struct drm_connector *connector, bool force) } static const struct drm_connector_funcs sde_connector_ops = { - .dpms = drm_atomic_helper_connector_dpms, + .dpms = sde_connector_dpms, .reset = sde_connector_atomic_reset, .detect = sde_connector_detect, .destroy = sde_connector_destroy, @@ -681,6 +818,11 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, c_conn->panel = panel; c_conn->display = display; + c_conn->dpms_mode = DRM_MODE_DPMS_ON; + c_conn->lp_mode = 0; + c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON; + + sde_kms = to_sde_kms(priv->kms); if (sde_kms->vbif[VBIF_NRT]) { c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] = @@ -714,6 +856,8 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, goto error_cleanup_conn; } + mutex_init(&c_conn->lock); + rc = drm_connector_register(&c_conn->base); if (rc) { SDE_ERROR("failed to register drm connector, %d\n", rc); @@ -783,6 +927,10 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, "PLL_ENABLE", 0x0, 0, 1, 0, CONNECTOR_PROP_PLL_ENABLE); + msm_property_install_volatile_range(&c_conn->property_info, + "HDCP_VERSION", 0x0, 0, U8_MAX, 0, + CONNECTOR_PROP_HDCP_VERSION); + /* enum/bitmask properties */ msm_property_install_enum(&c_conn->property_info, "topology_name", DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name, @@ -793,6 +941,11 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, ARRAY_SIZE(e_topology_control), CONNECTOR_PROP_TOPOLOGY_CONTROL, 0); + msm_property_install_enum(&c_conn->property_info, "LP", + 0, 0, e_power_mode, + ARRAY_SIZE(e_power_mode), + CONNECTOR_PROP_LP, 0); + rc = msm_property_install_get_status(&c_conn->property_info); if (rc) { SDE_ERROR("failed to create one or more properties\n"); @@ -819,6 +972,7 @@ error_destroy_property: error_unregister_conn: drm_connector_unregister(&c_conn->base); error_cleanup_fence: + mutex_destroy(&c_conn->lock); sde_fence_deinit(&c_conn->retire_fence); error_cleanup_conn: drm_connector_cleanup(&c_conn->base); diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h index b76ce0aaf577..0f563ac25da8 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.h +++ b/drivers/gpu/drm/msm/sde/sde_connector.h @@ -153,6 +153,20 @@ struct sde_connector_ops { */ enum sde_csc_type (*get_csc_type)(struct drm_connector *connector, void *display); + + /** + * set_power - update dpms setting + * @connector: Pointer to drm connector structure + * @power_mode: One of the following, + * SDE_MODE_DPMS_ON + * SDE_MODE_DPMS_LP1 + * SDE_MODE_DPMS_LP2 + * SDE_MODE_DPMS_OFF + * @display: Pointer to private display structure + * Returns: Zero on success + */ + int (*set_power)(struct drm_connector *connector, + int power_mode, void *display); }; /** @@ -165,8 +179,12 @@ struct sde_connector_ops { * @mmu_secure: MMU id for secure buffers * @mmu_unsecure: MMU id for unsecure buffers * @name: ASCII name of connector + * @lock: Mutex lock object for this structure * @retire_fence: Retire fence reference * @ops: Local callback function pointer table + * @dpms_mode: DPMS property setting from user space + * @lp_mode: LP property setting from user space + * @last_panel_power_mode: Last consolidated dpms/lp mode setting * @property_info: Private structure for generic property handling * @property_data: Array of private data for generic property handling * @blob_caps: Pointer to blob structure for 'capabilities' property @@ -185,8 +203,12 @@ struct sde_connector { char name[SDE_CONNECTOR_NAME_SIZE]; + struct mutex lock; struct sde_fence retire_fence; struct sde_connector_ops ops; + int dpms_mode; + int lp_mode; + int last_panel_power_mode; struct msm_property_info property_info; struct msm_property_data property_data[CONNECTOR_PROP_COUNT]; @@ -361,5 +383,29 @@ bool sde_connector_mode_needs_full_range(struct drm_connector *connector); */ enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn); +/** + * sde_connector_get_dpms - query dpms setting + * @connector: Pointer to drm connector structure + * Returns: Current DPMS setting for connector + */ +int sde_connector_get_dpms(struct drm_connector *connector); + +/** + * sde_connector_needs_offset - adjust the output fence offset based on + * display type + * @connector: Pointer to drm connector object + * Returns: true if offset is required, false for all other cases. + */ +static inline bool sde_connector_needs_offset(struct drm_connector *connector) +{ + struct sde_connector *c_conn; + + if (!connector) + return false; + + c_conn = to_sde_connector(connector); + return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL); +} + #endif /* _SDE_CONNECTOR_H_ */ diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c index dbfc2dd11a17..83c8982b2e00 100644 --- a/drivers/gpu/drm/msm/sde/sde_core_irq.c +++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c @@ -32,7 +32,7 @@ static void sde_core_irq_callback_handler(void *arg, int irq_idx) struct sde_irq_callback *cb; unsigned long irq_flags; - SDE_DEBUG("irq_idx=%d\n", irq_idx); + pr_debug("irq_idx=%d\n", irq_idx); if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) SDE_ERROR("irq_idx=%d has no registered callback\n", irq_idx); diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 2e9e2192670d..30e9d688396f 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -600,23 +600,14 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc, { struct sde_crtc *sde_crtc; struct sde_crtc_state *cstate; - struct drm_connector *conn; - struct sde_connector *c_conn; - struct drm_device *dev; - struct msm_drm_private *priv; - struct sde_kms *sde_kms; int i; - if (!crtc || !crtc->state || !crtc->dev) { + if (!crtc || !crtc->state) { SDE_ERROR("invalid crtc\n"); return; } - dev = crtc->dev; - priv = dev->dev_private; - sde_crtc = to_sde_crtc(crtc); - sde_kms = _sde_crtc_get_kms(crtc); cstate = to_sde_crtc_state(crtc->state); SDE_EVT32(DRMID(crtc)); @@ -625,22 +616,6 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc, for (i = 0; i < cstate->num_connectors; ++i) sde_connector_complete_commit(cstate->connectors[i]); - - if (sde_splash_get_lk_complete_status(&sde_kms->splash_info)) { - mutex_lock(&dev->mode_config.mutex); - drm_for_each_connector(conn, crtc->dev) { - if (conn->state->crtc != crtc) - continue; - - c_conn = to_sde_connector(conn); - - sde_splash_clean_up_free_resource(priv->kms, - &priv->phandle, - c_conn->connector_type, - c_conn->display); - } - mutex_unlock(&dev->mode_config.mutex); - } } /** @@ -935,6 +910,15 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) sde_kms = _sde_crtc_get_kms(crtc); priv = sde_kms->dev->dev_private; + /* + * If no mixers has been allocated in sde_crtc_atomic_check(), + * it means we are trying to start a CRTC whose state is disabled: + * nothing else needs to be done. + */ + if (unlikely(!sde_crtc->num_mixers)) + return; + + SDE_ATRACE_BEGIN("crtc_commit"); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) @@ -979,8 +963,10 @@ end: * _sde_crtc_vblank_enable_nolock - update power resource and vblank request * @sde_crtc: Pointer to sde crtc structure * @enable: Whether to enable/disable vblanks + * + * @Return: error code */ -static void _sde_crtc_vblank_enable_nolock( +static int _sde_crtc_vblank_enable_no_lock( struct sde_crtc *sde_crtc, bool enable) { struct drm_device *dev; @@ -988,10 +974,11 @@ static void _sde_crtc_vblank_enable_nolock( struct drm_encoder *enc; struct msm_drm_private *priv; struct sde_kms *sde_kms; + int ret = 0; if (!sde_crtc) { SDE_ERROR("invalid crtc\n"); - return; + return -EINVAL; } crtc = &sde_crtc->base; @@ -1000,13 +987,16 @@ static void _sde_crtc_vblank_enable_nolock( if (!priv->kms) { SDE_ERROR("invalid kms\n"); - return; + return -EINVAL; } sde_kms = to_sde_kms(priv->kms); if (enable) { - sde_power_resource_enable(&priv->phandle, + ret = sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true); + if (ret) + return ret; + list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { if (enc->crtc != crtc) continue; @@ -1025,9 +1015,11 @@ static void _sde_crtc_vblank_enable_nolock( sde_encoder_register_vblank_callback(enc, NULL, NULL); } - sde_power_resource_enable(&priv->phandle, + ret = sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false); } + + return ret; } /** @@ -1073,8 +1065,8 @@ static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable) if (sde_crtc->suspend == enable) SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n", crtc->base.id, enable); - else if (atomic_read(&sde_crtc->vblank_refcount) != 0) - _sde_crtc_vblank_enable_nolock(sde_crtc, !enable); + else if (sde_crtc->enabled && sde_crtc->vblank_requested) + _sde_crtc_vblank_enable_no_lock(sde_crtc, !enable); sde_crtc->suspend = enable; @@ -1158,39 +1150,13 @@ static void sde_crtc_reset(struct drm_crtc *crtc) crtc->state = &cstate->base; } -static int _sde_crtc_vblank_no_lock(struct sde_crtc *sde_crtc, bool en) -{ - if (!sde_crtc) { - SDE_ERROR("invalid crtc\n"); - return -EINVAL; - } else if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) { - SDE_DEBUG("crtc%d vblank enable\n", sde_crtc->base.base.id); - if (!sde_crtc->suspend) - _sde_crtc_vblank_enable_nolock(sde_crtc, true); - } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) { - SDE_ERROR("crtc%d invalid vblank disable\n", - sde_crtc->base.base.id); - return -EINVAL; - } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) { - SDE_DEBUG("crtc%d vblank disable\n", sde_crtc->base.base.id); - if (!sde_crtc->suspend) - _sde_crtc_vblank_enable_nolock(sde_crtc, false); - } else { - SDE_DEBUG("crtc%d vblank %s refcount:%d\n", - sde_crtc->base.base.id, - en ? "enable" : "disable", - atomic_read(&sde_crtc->vblank_refcount)); - } - - return 0; -} - static void sde_crtc_disable(struct drm_crtc *crtc) { struct drm_encoder *encoder; struct sde_crtc *sde_crtc; struct sde_kms *sde_kms; struct msm_drm_private *priv; + int ret = 0; if (!crtc || !crtc->dev || !crtc->state) { SDE_ERROR("invalid crtc\n"); @@ -1210,17 +1176,19 @@ static void sde_crtc_disable(struct drm_crtc *crtc) _sde_crtc_set_suspend(crtc, true); mutex_lock(&sde_crtc->crtc_lock); - SDE_EVT32(DRMID(crtc)); + SDE_EVT32(DRMID(crtc), sde_crtc->enabled, sde_crtc->suspend, + sde_crtc->vblank_requested); - if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) { - SDE_ERROR("crtc%d invalid vblank refcount\n", - crtc->base.id); - SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->vblank_refcount)); - while (atomic_read(&sde_crtc->vblank_refcount)) - if (_sde_crtc_vblank_no_lock(sde_crtc, false)) - break; + if (sde_crtc->enabled && !sde_crtc->suspend && + sde_crtc->vblank_requested) { + ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, false); + if (ret) + SDE_ERROR("%s vblank enable failed: %d\n", + sde_crtc->name, ret); } + sde_crtc->enabled = false; + if (atomic_read(&sde_crtc->frame_pending)) { /* release bandwidth and other resources */ SDE_ERROR("crtc%d invalid frame pending\n", @@ -1255,6 +1223,7 @@ static void sde_crtc_enable(struct drm_crtc *crtc) struct sde_hw_mixer_cfg cfg; struct drm_encoder *encoder; int i; + int ret = 0; if (!crtc) { SDE_ERROR("invalid crtc\n"); @@ -1283,6 +1252,19 @@ static void sde_crtc_enable(struct drm_crtc *crtc) sde_crtc_request_flip_cb, (void *)crtc); } + mutex_lock(&sde_crtc->crtc_lock); + SDE_EVT32(DRMID(crtc), sde_crtc->enabled, sde_crtc->suspend, + sde_crtc->vblank_requested); + if (!sde_crtc->enabled && !sde_crtc->suspend && + sde_crtc->vblank_requested) { + ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, true); + if (ret) + SDE_ERROR("%s vblank enable failed: %d\n", + sde_crtc->name, ret); + } + sde_crtc->enabled = true; + mutex_unlock(&sde_crtc->crtc_lock); + for (i = 0; i < sde_crtc->num_mixers; i++) { lm = mixer[i].hw_lm; cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode); @@ -1329,8 +1311,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, struct drm_display_mode *mode; int cnt = 0, rc = 0, mixer_width, i, z_pos; - int left_crtc_zpos_cnt[SDE_STAGE_MAX] = {0}; - int right_crtc_zpos_cnt[SDE_STAGE_MAX] = {0}; + int left_zpos_cnt = 0, right_zpos_cnt = 0; if (!crtc) { SDE_ERROR("invalid crtc\n"); @@ -1347,6 +1328,10 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, mode = &state->adjusted_mode; SDE_DEBUG("%s: check", sde_crtc->name); + /* force a full mode set if active state changed */ + if (state->active_changed) + state->mode_changed = true; + mixer_width = sde_crtc_mixer_width(sde_crtc, mode); /* get plane state for all drm planes associated with crtc state */ @@ -1380,11 +1365,12 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, } } + /* assign mixer stages based on sorted zpos property */ + sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); + if (!sde_is_custom_client()) { int stage_old = pstates[0].stage; - /* assign mixer stages based on sorted zpos property */ - sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); z_pos = 0; for (i = 0; i < cnt; i++) { if (stage_old != pstates[i].stage) @@ -1394,8 +1380,14 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, } } + z_pos = -1; for (i = 0; i < cnt; i++) { - z_pos = pstates[i].stage; + /* reset counts at every new blend stage */ + if (pstates[i].stage != z_pos) { + left_zpos_cnt = 0; + right_zpos_cnt = 0; + z_pos = pstates[i].stage; + } /* verify z_pos setting before using it */ if (z_pos >= SDE_STAGE_MAX - SDE_STAGE_0) { @@ -1404,22 +1396,24 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, rc = -EINVAL; goto end; } else if (pstates[i].drm_pstate->crtc_x < mixer_width) { - if (left_crtc_zpos_cnt[z_pos] == 2) { - SDE_ERROR("> 2 plane @ stage%d on left\n", + if (left_zpos_cnt == 2) { + SDE_ERROR("> 2 planes @ stage %d on left\n", z_pos); rc = -EINVAL; goto end; } - left_crtc_zpos_cnt[z_pos]++; + left_zpos_cnt++; + } else { - if (right_crtc_zpos_cnt[z_pos] == 2) { - SDE_ERROR("> 2 plane @ stage%d on right\n", + if (right_zpos_cnt == 2) { + SDE_ERROR("> 2 planes @ stage %d on right\n", z_pos); rc = -EINVAL; goto end; } - right_crtc_zpos_cnt[z_pos]++; + right_zpos_cnt++; } + pstates[i].sde_pstate->stage = z_pos + SDE_STAGE_0; SDE_DEBUG("%s: zpos %d", sde_crtc->name, z_pos); } @@ -1431,6 +1425,49 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, goto end; } + /* + * enforce pipe priority restrictions + * use pstates sorted by stage to check planes on same stage + * we assume that all pipes are in source split so its valid to compare + * without taking into account left/right mixer placement + */ + for (i = 1; i < cnt; i++) { + struct plane_state *prv_pstate, *cur_pstate; + int32_t prv_x, cur_x, prv_id, cur_id; + + prv_pstate = &pstates[i - 1]; + cur_pstate = &pstates[i]; + if (prv_pstate->stage != cur_pstate->stage) + continue; + + prv_x = prv_pstate->drm_pstate->crtc_x; + cur_x = cur_pstate->drm_pstate->crtc_x; + prv_id = prv_pstate->sde_pstate->base.plane->base.id; + cur_id = cur_pstate->sde_pstate->base.plane->base.id; + + /* + * Planes are enumerated in pipe-priority order such that planes + * with lower drm_id must be left-most in a shared blend-stage + * when using source split. + */ + if (cur_x > prv_x && cur_id < prv_id) { + SDE_ERROR( + "shared z_pos %d lower id plane%d @ x%d should be left of plane%d @ x %d\n", + cur_pstate->stage, cur_id, cur_x, + prv_id, prv_x); + rc = -EINVAL; + goto end; + } else if (cur_x < prv_x && cur_id > prv_id) { + SDE_ERROR( + "shared z_pos %d lower id plane%d @ x%d should be left of plane%d @ x %d\n", + cur_pstate->stage, prv_id, prv_x, + cur_id, cur_x); + rc = -EINVAL; + goto end; + } + } + + end: return rc; } @@ -1438,7 +1475,7 @@ end: int sde_crtc_vblank(struct drm_crtc *crtc, bool en) { struct sde_crtc *sde_crtc; - int rc; + int ret; if (!crtc) { SDE_ERROR("invalid crtc\n"); @@ -1447,10 +1484,19 @@ int sde_crtc_vblank(struct drm_crtc *crtc, bool en) sde_crtc = to_sde_crtc(crtc); mutex_lock(&sde_crtc->crtc_lock); - rc = _sde_crtc_vblank_no_lock(sde_crtc, en); + SDE_EVT32(DRMID(&sde_crtc->base), en, sde_crtc->enabled, + sde_crtc->suspend, sde_crtc->vblank_requested); + if (sde_crtc->enabled && !sde_crtc->suspend) { + ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, en); + if (ret) + SDE_ERROR("%s vblank enable failed: %d\n", + sde_crtc->name, ret); + } + + sde_crtc->vblank_requested = en; mutex_unlock(&sde_crtc->crtc_lock); - return rc; + return 0; } void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, @@ -1551,7 +1597,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, sde_kms_info_add_keyint(info, "max_mdp_clk", sde_kms->perf.max_core_clk_rate); msm_property_set_blob(&sde_crtc->property_info, &sde_crtc->blob_info, - info->data, info->len, CRTC_PROP_INFO); + info->data, SDE_KMS_INFO_DATALEN(info), CRTC_PROP_INFO); kfree(info); } @@ -1628,19 +1674,28 @@ static int sde_crtc_atomic_get_property(struct drm_crtc *crtc, struct sde_crtc *sde_crtc; struct sde_crtc_state *cstate; int i, ret = -EINVAL; + bool conn_offset = 0; if (!crtc || !state) { SDE_ERROR("invalid argument(s)\n"); } else { sde_crtc = to_sde_crtc(crtc); cstate = to_sde_crtc_state(state); + + for (i = 0; i < cstate->num_connectors; ++i) { + conn_offset = sde_connector_needs_offset( + cstate->connectors[i]); + if (conn_offset) + break; + } + i = msm_property_index(&sde_crtc->property_info, property); if (i == CRTC_PROP_OUTPUT_FENCE) { int offset = sde_crtc_get_property(cstate, CRTC_PROP_OUTPUT_FENCE_OFFSET); - ret = sde_fence_create( - &sde_crtc->output_fence, val, offset); + ret = sde_fence_create(&sde_crtc->output_fence, val, + offset + conn_offset); if (ret) SDE_ERROR("fence create failed\n"); } else { @@ -1763,8 +1818,7 @@ static int _sde_debugfs_status_show(struct seq_file *s, void *data) sde_crtc->vblank_cb_time = ktime_set(0, 0); } - seq_printf(s, "vblank_refcount:%d\n", - atomic_read(&sde_crtc->vblank_refcount)); + seq_printf(s, "vblank_enable:%d\n", sde_crtc->vblank_requested); mutex_unlock(&sde_crtc->crtc_lock); @@ -1892,7 +1946,6 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, crtc = &sde_crtc->base; crtc->dev = dev; - atomic_set(&sde_crtc->vblank_refcount, 0); mutex_init(&sde_crtc->crtc_lock); spin_lock_init(&sde_crtc->spin_lock); diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index 6b8483d574b1..0eed61580cd8 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -81,8 +81,11 @@ struct sde_crtc_frame_event { * @debugfs_root : Parent of debugfs node * @vblank_cb_count : count of vblank callback since last reset * @vblank_cb_time : ktime at vblank count reset - * @vblank_refcount : reference count for vblank enable request + * @vblank_requested : whether the user has requested vblank events * @suspend : whether or not a suspend operation is in progress + * @enabled : whether the SDE CRTC is currently enabled. updated in the + * commit-thread, not state-swap time which is earlier, so + * safe to make decisions on during VBLANK on/off work * @feature_list : list of color processing features supported on a crtc * @active_list : list of color processing features are active * @dirty_list : list of color processing features are dirty @@ -117,8 +120,9 @@ struct sde_crtc { u32 vblank_cb_count; ktime_t vblank_cb_time; - atomic_t vblank_refcount; + bool vblank_requested; bool suspend; + bool enabled; struct list_head feature_list; struct list_head active_list; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 23fb79241d84..cb8b349e72c7 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -506,11 +506,6 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) SDE_EVT32(DRMID(drm_enc)); - if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) { - SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id); - del_timer_sync(&sde_enc->frame_done_timer); - } - for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; @@ -523,6 +518,12 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) } } + /* after phys waits for frame-done, should be no more frames pending */ + if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) { + SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id); + del_timer_sync(&sde_enc->frame_done_timer); + } + if (sde_enc->cur_master && sde_enc->cur_master->ops.disable) sde_enc->cur_master->ops.disable(sde_enc->cur_master); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 69a4237f7b67..d58c06de1684 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -281,23 +281,40 @@ static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx) { struct sde_encoder_phys_vid *vid_enc = arg; struct sde_encoder_phys *phys_enc; + struct sde_hw_ctl *hw_ctl; unsigned long lock_flags; - int new_cnt; + u32 flush_register = 0; + int new_cnt = -1, old_cnt = -1; if (!vid_enc) return; phys_enc = &vid_enc->base; + hw_ctl = phys_enc->hw_ctl; + if (phys_enc->parent_ops.handle_vblank_virt) phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent, phys_enc); + old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt); + + /* + * only decrement the pending flush count if we've actually flushed + * hardware. due to sw irq latency, vblank may have already happened + * so we need to double-check with hw that it accepted the flush bits + */ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); - new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0); - SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0, - new_cnt); + if (hw_ctl && hw_ctl->ops.get_flush_register) + flush_register = hw_ctl->ops.get_flush_register(hw_ctl); + + if (flush_register == 0) + new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, + -1, 0); spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0, + old_cnt, new_cnt, flush_register); + /* Signal any waiting atomic commit thread */ wake_up_all(&phys_enc->pending_kickoff_wq); } @@ -700,6 +717,35 @@ static int sde_encoder_phys_vid_wait_for_commit_done( return ret; } +static void sde_encoder_phys_vid_prepare_for_kickoff( + struct sde_encoder_phys *phys_enc) +{ + struct sde_encoder_phys_vid *vid_enc; + struct sde_hw_ctl *ctl; + int rc; + + if (!phys_enc) { + SDE_ERROR("invalid encoder\n"); + return; + } + vid_enc = to_sde_encoder_phys_vid(phys_enc); + + ctl = phys_enc->hw_ctl; + if (!ctl || !ctl->ops.wait_reset_status) + return; + + /* + * hw supports hardware initiated ctl reset, so before we kickoff a new + * frame, need to check and wait for hw initiated ctl reset completion + */ + rc = ctl->ops.wait_reset_status(ctl); + if (rc) { + SDE_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n", + ctl->idx, rc); + SDE_DBG_DUMP("panic"); + } +} + static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) { struct msm_drm_private *priv; @@ -832,6 +878,7 @@ static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops) ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources; ops->control_vblank_irq = sde_encoder_phys_vid_control_vblank_irq; ops->wait_for_commit_done = sde_encoder_phys_vid_wait_for_commit_done; + ops->prepare_for_kickoff = sde_encoder_phys_vid_prepare_for_kickoff; ops->handle_post_kickoff = sde_encoder_phys_vid_handle_post_kickoff; ops->needs_single_flush = sde_encoder_phys_vid_needs_single_flush; ops->setup_misr = sde_encoder_phys_vid_setup_misr; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index a185eb338134..ed9a6ea37397 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -27,11 +27,11 @@ /** * Max hardware block in certain hardware. For ex: sspp pipes - * can have QSEED, pcc, igc, pa, csc, etc. This count is max - * 12 based on software design. It should be increased if any of the + * can have QSEED, pcc, igc, pa, csc, qos entries, etc. This count is + * 64 based on software design. It should be increased if any of the * hardware block has more subblocks. */ -#define MAX_SDE_HW_BLK 12 +#define MAX_SDE_HW_BLK 64 /* each entry will have register address and bit offset in that register */ #define MAX_BIT_OFFSET 2 @@ -134,6 +134,7 @@ enum { enum { VIG_QSEED_OFF, + VIG_QSEED_LEN, VIG_CSC_OFF, VIG_HSIC_PROP, VIG_MEMCOLOR_PROP, @@ -143,6 +144,7 @@ enum { enum { RGB_SCALER_OFF, + RGB_SCALER_LEN, RGB_PCC_PROP, RGB_PROP_MAX, }; @@ -301,6 +303,7 @@ static struct sde_prop_type sspp_prop[] = { static struct sde_prop_type vig_prop[] = { {VIG_QSEED_OFF, "qcom,sde-vig-qseed-off", false, PROP_TYPE_U32}, + {VIG_QSEED_LEN, "qcom,sde-vig-qseed-size", false, PROP_TYPE_U32}, {VIG_CSC_OFF, "qcom,sde-vig-csc-off", false, PROP_TYPE_U32}, {VIG_HSIC_PROP, "qcom,sde-vig-hsic", false, PROP_TYPE_U32_ARRAY}, {VIG_MEMCOLOR_PROP, "qcom,sde-vig-memcolor", false, @@ -310,6 +313,7 @@ static struct sde_prop_type vig_prop[] = { static struct sde_prop_type rgb_prop[] = { {RGB_SCALER_OFF, "qcom,sde-rgb-scaler-off", false, PROP_TYPE_U32}, + {RGB_SCALER_LEN, "qcom,sde-rgb-scaler-size", false, PROP_TYPE_U32}, {RGB_PCC_PROP, "qcom,sde-rgb-pcc", false, PROP_TYPE_U32_ARRAY}, }; @@ -440,8 +444,16 @@ static uint32_t _sde_copy_formats( static int _parse_dt_u32_handler(struct device_node *np, char *prop_name, u32 *offsets, int len, bool mandatory) { - int rc = of_property_read_u32_array(np, prop_name, offsets, len); + int rc = -EINVAL; + if (len > MAX_SDE_HW_BLK) { + SDE_ERROR( + "prop: %s tries out of bound access for u32 array read len: %d\n", + prop_name, len); + return -E2BIG; + } + + rc = of_property_read_u32_array(np, prop_name, offsets, len); if (rc && mandatory) SDE_ERROR("mandatory prop: %s u32 array read len:%d\n", prop_name, len); @@ -463,6 +475,14 @@ static int _parse_dt_bit_offset(struct device_node *np, if (arr) { len /= sizeof(u32); len &= ~0x1; + + if (len > (MAX_SDE_HW_BLK * MAX_BIT_OFFSET)) { + SDE_ERROR( + "prop: %s len: %d will lead to out of bound access\n", + prop_name, len / MAX_BIT_OFFSET); + return -E2BIG; + } + for (i = 0, j = 0; i < len; j++) { PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 0) = be32_to_cpu(arr[i]); @@ -497,8 +517,8 @@ static int _validate_dt_entry(struct device_node *np, sde_prop[0].prop_name); if ((*off_count > MAX_BLOCKS) || (*off_count < 0)) { if (sde_prop[0].is_mandatory) { - SDE_ERROR("invalid hw offset prop name:%s\"\ - count: %d\n", + SDE_ERROR( + "invalid hw offset prop name:%s count: %d\n", sde_prop[0].prop_name, *off_count); rc = -EINVAL; } @@ -541,8 +561,9 @@ static int _validate_dt_entry(struct device_node *np, sde_prop[i].type); break; } - SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\ - prop_count:%d\n", i, sde_prop[i].prop_name, + SDE_DEBUG( + "prop id:%d prop name:%s prop type:%d prop_count:%d\n", + i, sde_prop[i].prop_name, sde_prop[i].type, prop_count[i]); if (rc && sde_prop[i].is_mandatory && @@ -560,14 +581,16 @@ static int _validate_dt_entry(struct device_node *np, if (off_count && (prop_count[i] != *off_count) && sde_prop[i].is_mandatory) { - SDE_ERROR("prop:%s count:%d is different compared to \"\ - offset array:%d\n", sde_prop[i].prop_name, + SDE_ERROR( + "prop:%s count:%d is different compared to offset array:%d\n", + sde_prop[i].prop_name, prop_count[i], *off_count); rc = -EINVAL; goto end; } else if (off_count && prop_count[i] != *off_count) { - SDE_DEBUG("prop:%s count:%d is different compared to \"\ - offset array:%d\n", sde_prop[i].prop_name, + SDE_DEBUG( + "prop:%s count:%d is different compared to offset array:%d\n", + sde_prop[i].prop_name, prop_count[i], *off_count); rc = 0; prop_count[i] = 0; @@ -603,8 +626,9 @@ static int _read_dt_entry(struct device_node *np, case PROP_TYPE_U32: rc = of_property_read_u32(np, sde_prop[i].prop_name, &PROP_VALUE_ACCESS(prop_value, i, 0)); - SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\ - value:0x%x\n", i, sde_prop[i].prop_name, + SDE_DEBUG( + "prop id:%d prop name:%s prop type:%d value:0x%x\n", + i, sde_prop[i].prop_name, sde_prop[i].type, PROP_VALUE_ACCESS(prop_value, i, 0)); if (rc) @@ -614,8 +638,9 @@ static int _read_dt_entry(struct device_node *np, PROP_VALUE_ACCESS(prop_value, i, 0) = of_property_read_bool(np, sde_prop[i].prop_name); - SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\ - value:0x%x\n", i, sde_prop[i].prop_name, + SDE_DEBUG( + "prop id:%d prop name:%s prop type:%d value:0x%x\n", + i, sde_prop[i].prop_name, sde_prop[i].type, PROP_VALUE_ACCESS(prop_value, i, 0)); break; @@ -624,8 +649,9 @@ static int _read_dt_entry(struct device_node *np, &PROP_VALUE_ACCESS(prop_value, i, 0), prop_count[i], sde_prop[i].is_mandatory); if (rc && sde_prop[i].is_mandatory) { - SDE_ERROR("%s prop validation success but \"\ - read failed\n", sde_prop[i].prop_name); + SDE_ERROR( + "%s prop validation success but read failed\n", + sde_prop[i].prop_name); prop_exists[i] = false; goto end; } else { @@ -647,19 +673,21 @@ static int _read_dt_entry(struct device_node *np, prop_value, i, prop_count[i], sde_prop[i].is_mandatory); if (rc && sde_prop[i].is_mandatory) { - SDE_ERROR("%s prop validation success but \"\ - read failed\n", sde_prop[i].prop_name); + SDE_ERROR( + "%s prop validation success but read failed\n", + sde_prop[i].prop_name); prop_exists[i] = false; goto end; } else { if (rc) prop_exists[i] = false; - SDE_DEBUG("prop id:%d prop name:%s prop \"\ - type:%d", i, sde_prop[i].prop_name, + SDE_DEBUG( + "prop id:%d prop name:%s prop type:%d", + i, sde_prop[i].prop_name, sde_prop[i].type); for (j = 0; j < prop_count[i]; j++) - SDE_DEBUG(" count[%d]: bit:0x%x \"\ - off:0x%x \n", j, + SDE_DEBUG( + "count[%d]: bit:0x%x off:0x%x\n", j, PROP_BITVALUE_ACCESS(prop_value, i, j, 0), PROP_BITVALUE_ACCESS(prop_value, @@ -691,6 +719,8 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, sblk->maxdwnscale = MAX_SSPP_DOWNSCALE; sblk->format_list = plane_formats_yuv; sspp->id = SSPP_VIG0 + *vig_count; + snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", + sspp->id - SSPP_VIG0); sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count; sspp->type = SSPP_TYPE_VIG; set_bit(SDE_SSPP_QOS, &sspp->features); @@ -704,14 +734,24 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2; sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value, VIG_QSEED_OFF, 0); - } else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) { + sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value, + VIG_QSEED_LEN, 0); + snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN, + "sspp_scaler%u", sspp->id - SSPP_VIG0); + } else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) { set_bit(SDE_SSPP_SCALER_QSEED3, &sspp->features); sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3; sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value, VIG_QSEED_OFF, 0); + sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value, + VIG_QSEED_LEN, 0); + snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN, + "sspp_scaler%u", sspp->id - SSPP_VIG0); } sblk->csc_blk.id = SDE_SSPP_CSC; + snprintf(sblk->csc_blk.name, SDE_HW_BLK_NAME_LEN, + "sspp_csc%u", sspp->id - SSPP_VIG0); if (sde_cfg->csc_type == SDE_SSPP_CSC) { set_bit(SDE_SSPP_CSC, &sspp->features); sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value, @@ -723,6 +763,8 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, } sblk->hsic_blk.id = SDE_SSPP_HSIC; + snprintf(sblk->hsic_blk.name, SDE_HW_BLK_NAME_LEN, + "sspp_hsic%u", sspp->id - SSPP_VIG0); if (prop_exists[VIG_HSIC_PROP]) { sblk->hsic_blk.base = PROP_VALUE_ACCESS(prop_value, VIG_HSIC_PROP, 0); @@ -733,6 +775,8 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, } sblk->memcolor_blk.id = SDE_SSPP_MEMCOLOR; + snprintf(sblk->memcolor_blk.name, SDE_HW_BLK_NAME_LEN, + "sspp_memcolor%u", sspp->id - SSPP_VIG0); if (prop_exists[VIG_MEMCOLOR_PROP]) { sblk->memcolor_blk.base = PROP_VALUE_ACCESS(prop_value, VIG_MEMCOLOR_PROP, 0); @@ -743,6 +787,8 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg, } sblk->pcc_blk.id = SDE_SSPP_PCC; + snprintf(sblk->pcc_blk.name, SDE_HW_BLK_NAME_LEN, + "sspp_pcc%u", sspp->id - SSPP_VIG0); if (prop_exists[VIG_PCC_PROP]) { sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value, VIG_PCC_PROP, 0); @@ -762,6 +808,8 @@ static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg, sblk->maxdwnscale = MAX_SSPP_DOWNSCALE; sblk->format_list = plane_formats; sspp->id = SSPP_RGB0 + *rgb_count; + snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", + sspp->id - SSPP_VIG0); sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count; sspp->type = SSPP_TYPE_RGB; set_bit(SDE_SSPP_QOS, &sspp->features); @@ -775,11 +823,19 @@ static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg, sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2; sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value, RGB_SCALER_OFF, 0); + sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value, + RGB_SCALER_LEN, 0); + snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN, + "sspp_scaler%u", sspp->id - SSPP_VIG0); } else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) { set_bit(SDE_SSPP_SCALER_RGB, &sspp->features); sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3; sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value, - RGB_SCALER_OFF, 0); + RGB_SCALER_LEN, 0); + sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value, + SSPP_SCALE_SIZE, 0); + snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN, + "sspp_scaler%u", sspp->id - SSPP_VIG0); } sblk->pcc_blk.id = SDE_SSPP_PCC; @@ -803,6 +859,8 @@ static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg, sblk->maxdwnscale = SSPP_UNITY_SCALE; sblk->format_list = cursor_formats; sspp->id = SSPP_CURSOR0 + *cursor_count; + snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", + sspp->id - SSPP_VIG0); sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count; sspp->type = SSPP_TYPE_CURSOR; (*cursor_count)++; @@ -819,6 +877,8 @@ static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg, sspp->id = SSPP_DMA0 + *dma_count; sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count; sspp->type = SSPP_TYPE_DMA; + snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", + sspp->id - SSPP_VIG0); set_bit(SDE_SSPP_QOS, &sspp->features); (*dma_count)++; snprintf(sspp->name, sizeof(sspp->name), "dma%d", *dma_count-1); @@ -917,6 +977,7 @@ static int sde_sspp_parse_dt(struct device_node *np, sspp->sblk = sblk; sspp->base = PROP_VALUE_ACCESS(prop_value, SSPP_OFF, i); + sspp->len = PROP_VALUE_ACCESS(prop_value, SSPP_SIZE, 0); sblk->maxlinewidth = sde_cfg->max_sspp_linewidth; set_bit(SDE_SSPP_SRC, &sspp->features); @@ -944,6 +1005,16 @@ static int sde_sspp_parse_dt(struct device_node *np, goto end; } + snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u", + sspp->id - SSPP_VIG0); + + if (sspp->clk_ctrl >= SDE_CLK_CTRL_MAX) { + SDE_ERROR("%s: invalid clk ctrl: %d\n", + sblk->src_blk.name, sspp->clk_ctrl); + rc = -EINVAL; + goto end; + } + sblk->maxhdeciexp = MAX_HORZ_DECIMATION; sblk->maxvdeciexp = MAX_VERT_DECIMATION; @@ -1033,7 +1104,10 @@ static int sde_ctl_parse_dt(struct device_node *np, for (i = 0; i < off_count; i++) { ctl = sde_cfg->ctl + i; ctl->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i); + ctl->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0); ctl->id = CTL_0 + i; + snprintf(ctl->name, SDE_HW_BLK_NAME_LEN, "ctl_%u", + ctl->id - CTL_0); if (i < MAX_SPLIT_DISPLAY_CTL) set_bit(SDE_CTL_SPLIT_DISPLAY, &ctl->features); @@ -1125,6 +1199,9 @@ static int sde_mixer_parse_dt(struct device_node *np, mixer->base = PROP_VALUE_ACCESS(prop_value, MIXER_OFF, i); mixer->len = PROP_VALUE_ACCESS(prop_value, MIXER_LEN, 0); mixer->id = LM_0 + i; + snprintf(mixer->name, SDE_HW_BLK_NAME_LEN, "lm_%u", + mixer->id - LM_0); + if (!prop_exists[MIXER_LEN]) mixer->len = DEFAULT_SDE_HW_BLOCK_LEN; @@ -1211,6 +1288,9 @@ static int sde_intf_parse_dt(struct device_node *np, intf->base = PROP_VALUE_ACCESS(prop_value, INTF_OFF, i); intf->len = PROP_VALUE_ACCESS(prop_value, INTF_LEN, 0); intf->id = INTF_0 + i; + snprintf(intf->name, SDE_HW_BLK_NAME_LEN, "intf_%u", + intf->id - INTF_0); + if (!prop_exists[INTF_LEN]) intf->len = DEFAULT_SDE_HW_BLOCK_LEN; @@ -1290,10 +1370,20 @@ static int sde_wb_parse_dt(struct device_node *np, wb->base = PROP_VALUE_ACCESS(prop_value, WB_OFF, i); wb->id = WB_0 + PROP_VALUE_ACCESS(prop_value, WB_ID, i); + snprintf(wb->name, SDE_HW_BLK_NAME_LEN, "wb_%u", + wb->id - WB_0); wb->clk_ctrl = SDE_CLK_CTRL_WB0 + PROP_VALUE_ACCESS(prop_value, WB_ID, i); wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i); wb->vbif_idx = VBIF_NRT; + + if (wb->clk_ctrl >= SDE_CLK_CTRL_MAX) { + SDE_ERROR("%s: invalid clk ctrl: %d\n", + wb->name, wb->clk_ctrl); + rc = -EINVAL; + goto end; + } + wb->len = PROP_VALUE_ACCESS(prop_value, WB_LEN, 0); wb->format_list = wb2_formats; if (!prop_exists[WB_LEN]) @@ -1515,7 +1605,10 @@ static int sde_dspp_parse_dt(struct device_node *np, for (i = 0; i < off_count; i++) { dspp = sde_cfg->dspp + i; dspp->base = PROP_VALUE_ACCESS(prop_value, DSPP_OFF, i); + dspp->len = PROP_VALUE_ACCESS(prop_value, DSPP_SIZE, 0); dspp->id = DSPP_0 + i; + snprintf(dspp->name, SDE_HW_BLK_NAME_LEN, "dspp_%u", + dspp->id - DSPP_0); sblk = kzalloc(sizeof(*sblk), GFP_KERNEL); if (!sblk) { @@ -1585,6 +1678,8 @@ static int sde_cdm_parse_dt(struct device_node *np, cdm = sde_cfg->cdm + i; cdm->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i); cdm->id = CDM_0 + i; + snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u", + cdm->id - CDM_0); cdm->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0); /* intf3 and wb2 for cdm block */ @@ -1650,6 +1745,8 @@ static int sde_vbif_parse_dt(struct device_node *np, vbif->base = PROP_VALUE_ACCESS(prop_value, VBIF_OFF, i); vbif->len = vbif_len; vbif->id = VBIF_0 + PROP_VALUE_ACCESS(prop_value, VBIF_ID, i); + snprintf(vbif->name, SDE_HW_BLK_NAME_LEN, "vbif_%u", + vbif->id - VBIF_0); SDE_DEBUG("vbif:%d\n", vbif->id - VBIF_0); @@ -1777,15 +1874,21 @@ static int sde_pp_parse_dt(struct device_node *np, pp->base = PROP_VALUE_ACCESS(prop_value, PP_OFF, i); pp->id = PINGPONG_0 + i; + snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u", + pp->id - PINGPONG_0); pp->len = PROP_VALUE_ACCESS(prop_value, PP_LEN, 0); sblk->te.base = PROP_VALUE_ACCESS(prop_value, TE_OFF, i); sblk->te.id = SDE_PINGPONG_TE; + snprintf(sblk->te.name, SDE_HW_BLK_NAME_LEN, "te_%u", + pp->id - PINGPONG_0); set_bit(SDE_PINGPONG_TE, &pp->features); sblk->te2.base = PROP_VALUE_ACCESS(prop_value, TE2_OFF, i); if (sblk->te2.base) { sblk->te2.id = SDE_PINGPONG_TE2; + snprintf(sblk->te2.name, SDE_HW_BLK_NAME_LEN, "te2_%u", + pp->id - PINGPONG_0); set_bit(SDE_PINGPONG_TE2, &pp->features); set_bit(SDE_PINGPONG_SPLIT, &pp->features); } @@ -1796,6 +1899,8 @@ static int sde_pp_parse_dt(struct device_node *np, sblk->dsc.base = PROP_VALUE_ACCESS(prop_value, DSC_OFF, i); if (sblk->dsc.base) { sblk->dsc.id = SDE_PINGPONG_DSC; + snprintf(sblk->dsc.name, SDE_HW_BLK_NAME_LEN, "dsc_%u", + sblk->dsc.id - PINGPONG_0); set_bit(SDE_PINGPONG_DSC, &pp->features); } } @@ -1926,9 +2031,13 @@ static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg) cfg->mdss_count = 1; cfg->mdss[0].base = MDSS_BASE_OFFSET; cfg->mdss[0].id = MDP_TOP; + snprintf(cfg->mdss[0].name, SDE_HW_BLK_NAME_LEN, "mdss_%u", + cfg->mdss[0].id - MDP_TOP); cfg->mdp_count = 1; cfg->mdp[0].id = MDP_TOP; + snprintf(cfg->mdp[0].name, SDE_HW_BLK_NAME_LEN, "top_%u", + cfg->mdp[0].id - MDP_TOP); cfg->mdp[0].base = PROP_VALUE_ACCESS(prop_value, SDE_OFF, 0); cfg->mdp[0].len = PROP_VALUE_ACCESS(prop_value, SDE_LEN, 0); if (!prop_exists[SDE_LEN]) @@ -1997,7 +2106,7 @@ static int sde_perf_parse_dt(struct device_node *np, goto end; } - prop_value = kzalloc(SDE_PROP_MAX * + prop_value = kzalloc(PERF_PROP_MAX * sizeof(struct sde_prop_value), GFP_KERNEL); if (!prop_value) { rc = -ENOMEM; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index 73bb77b7afa6..81e6bfe6defe 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -44,10 +44,12 @@ #define SDE_HW_VER_172 SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */ #define SDE_HW_VER_300 SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */ #define SDE_HW_VER_301 SDE_HW_VER(3, 0, 1) /* 8998 v1.1 */ -#define SDE_HW_VER_400 SDE_HW_VER(4, 0, 0) /* msmskunk v1.0 */ +#define SDE_HW_VER_400 SDE_HW_VER(4, 0, 0) /* sdm845 v1.0 */ #define IS_MSMSKUNK_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400) +#define SDE_HW_BLK_NAME_LEN 16 + #define MAX_IMG_WIDTH 0x3fff #define MAX_IMG_HEIGHT 0x3fff @@ -58,8 +60,6 @@ #define SDE_COLOR_PROCESS_MAJOR(version) (((version) & 0xFFFF0000) >> 16) #define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF) -#define SSPP_NAME_SIZE 12 - /** * MDP TOP BLOCK features * @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe @@ -236,12 +236,14 @@ enum { /** * MACRO SDE_HW_BLK_INFO - information of HW blocks inside SDE + * @name: string name for debug purposes * @id: enum identifying this block * @base: register base offset to mdss * @len: length of hardware block * @features bit mask identifying sub-blocks/features */ #define SDE_HW_BLK_INFO \ + char name[SDE_HW_BLK_NAME_LEN]; \ u32 id; \ u32 base; \ u32 len; \ @@ -249,12 +251,14 @@ enum { /** * MACRO SDE_HW_SUBBLK_INFO - information of HW sub-block inside SDE + * @name: string name for debug purposes * @id: enum identifying this sub-block * @base: offset of this sub-block relative to the block * offset * @len register block length of this sub-block */ #define SDE_HW_SUBBLK_INFO \ + char name[SDE_HW_BLK_NAME_LEN]; \ u32 id; \ u32 base; \ u32 len @@ -458,7 +462,6 @@ struct sde_ctl_cfg { * @sblk: SSPP sub-blocks information * @xin_id: bus client identifier * @clk_ctrl clock control identifier - * @name source pipe name * @type sspp type identifier */ struct sde_sspp_cfg { @@ -466,7 +469,6 @@ struct sde_sspp_cfg { const struct sde_sspp_sub_blks *sblk; u32 xin_id; enum sde_clk_ctrl_type clk_ctrl; - char name[SSPP_NAME_SIZE]; u32 type; }; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c index 9ec81c227e60..da04be4e9719 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c @@ -14,6 +14,7 @@ #include "sde_hwio.h" #include "sde_hw_catalog.h" #include "sde_hw_cdm.h" +#include "sde_dbg.h" #define CDM_CSC_10_OPMODE 0x000 #define CDM_CSC_10_BASE 0x004 @@ -295,6 +296,9 @@ struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx, */ sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg); + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c index 56d9f2a4a9b8..270e79a774b2 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c @@ -13,6 +13,7 @@ #include <linux/delay.h> #include "sde_hwio.h" #include "sde_hw_ctl.h" +#include "sde_dbg.h" #define CTL_LAYER(lm) \ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004)) @@ -39,6 +40,7 @@ static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl, if (ctl == m->ctl[i].id) { b->base_off = addr; b->blk_off = m->ctl[i].base; + b->length = m->ctl[i].len; b->hwversion = m->hwversion; b->log_mask = SDE_DBG_MASK_CTL; return &m->ctl[i]; @@ -92,6 +94,12 @@ static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx) SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); } +static inline u32 sde_hw_ctl_get_flush_register(struct sde_hw_ctl *ctx) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + + return SDE_REG_READ(c, CTL_FLUSH); +} static inline uint32_t sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx, enum sde_sspp sspp) @@ -247,23 +255,58 @@ static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx, return 0; } +static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 count) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 status; + + /* protect to do at least one iteration */ + if (!count) + count = 1; + + /* + * it takes around 30us to have mdp finish resetting its ctl path + * poll every 50us so that reset should be completed at 1st poll + */ + do { + status = SDE_REG_READ(c, CTL_SW_RESET); + status &= 0x01; + if (status) + usleep_range(20, 50); + } while (status && --count > 0); + + return status; +} + static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx) { struct sde_hw_blk_reg_map *c = &ctx->hw; - int count = SDE_REG_RESET_TIMEOUT_COUNT; - int reset; + pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx); SDE_REG_WRITE(c, CTL_SW_RESET, 0x1); + if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_COUNT)) + return -EINVAL; - for (; count > 0; count--) { - /* insert small delay to avoid spinning the cpu while waiting */ - usleep_range(20, 50); - reset = SDE_REG_READ(c, CTL_SW_RESET); - if (reset == 0) - return 0; + return 0; +} + +static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx) +{ + struct sde_hw_blk_reg_map *c = &ctx->hw; + u32 status; + + status = SDE_REG_READ(c, CTL_SW_RESET); + status &= 0x01; + if (!status) + return 0; + + pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx); + if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_COUNT)) { + pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx); + return -EINVAL; } - return -EINVAL; + return 0; } static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx) @@ -415,9 +458,11 @@ static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops, ops->update_pending_flush = sde_hw_ctl_update_pending_flush; ops->get_pending_flush = sde_hw_ctl_get_pending_flush; ops->trigger_flush = sde_hw_ctl_trigger_flush; + ops->get_flush_register = sde_hw_ctl_get_flush_register; ops->trigger_start = sde_hw_ctl_trigger_start; ops->setup_intf_cfg = sde_hw_ctl_intf_cfg; ops->reset = sde_hw_ctl_reset_control; + ops->wait_reset_status = sde_hw_ctl_wait_reset_status; ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages; ops->setup_blendstage = sde_hw_ctl_setup_blendstage; ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp; @@ -452,6 +497,9 @@ struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx, c->mixer_count = m->mixer_count; c->mixer_hw_caps = m->mixer; + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h index 2fb7b377e51d..74dbde92639a 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -94,6 +94,13 @@ struct sde_hw_ctl_ops { void (*trigger_flush)(struct sde_hw_ctl *ctx); /** + * Read the value of the flush register + * @ctx : ctl path ctx pointer + * @Return: value of the ctl flush register. + */ + u32 (*get_flush_register)(struct sde_hw_ctl *ctx); + + /** * Setup ctl_path interface config * @ctx * @cfg : interface config structure pointer @@ -103,6 +110,17 @@ struct sde_hw_ctl_ops { int (*reset)(struct sde_hw_ctl *c); + /* + * wait_reset_status - checks ctl reset status + * @ctx : ctl path ctx pointer + * + * This function checks the ctl reset status bit. + * If the reset bit is set, it keeps polling the status till the hw + * reset is complete. + * Returns: 0 on success or -error if reset incomplete within interval + */ + int (*wait_reset_status)(struct sde_hw_ctl *ctx); + uint32_t (*get_bitmask_sspp)(struct sde_hw_ctl *ctx, enum sde_sspp blk); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c index d6250b07b4f0..2fd879a0030d 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,6 +15,7 @@ #include "sde_hw_catalog.h" #include "sde_hw_dspp.h" #include "sde_hw_color_processing.h" +#include "sde_dbg.h" static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp, struct sde_mdss_cfg *m, @@ -27,6 +28,7 @@ static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp, if (dspp == m->dspp[i].id) { b->base_off = addr; b->blk_off = m->dspp[i].base; + b->length = m->dspp[i].len; b->hwversion = m->hwversion; b->log_mask = SDE_DBG_MASK_DSPP; return &m->dspp[i]; @@ -111,6 +113,9 @@ struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx, c->cap = cfg; _setup_dspp_ops(c, c->cap->features); + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c index 042b0ee7909a..9e1b97800cb9 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,6 +13,7 @@ #include "sde_hwio.h" #include "sde_hw_catalog.h" #include "sde_hw_intf.h" +#include "sde_dbg.h" #define INTF_TIMING_ENGINE_EN 0x000 #define INTF_CONFIG 0x004 @@ -83,6 +84,7 @@ static struct sde_intf_cfg *_intf_offset(enum sde_intf intf, (m->intf[i].type != INTF_NONE)) { b->base_off = addr; b->blk_off = m->intf[i].base; + b->length = m->intf[i].len; b->hwversion = m->hwversion; b->log_mask = SDE_DBG_MASK_INTF; return &m->intf[i]; @@ -324,9 +326,9 @@ struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx, c->mdss = m; _setup_intf_ops(&c->ops, c->cap->features); - /* - * Perform any default initialization for the intf - */ + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c index 365b9b17715d..8b4e0901458f 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ #include "sde_hwio.h" #include "sde_hw_lm.h" #include "sde_hw_mdss.h" +#include "sde_dbg.h" #define LM_OP_MODE 0x00 #define LM_OUT_SIZE 0x04 @@ -37,6 +38,7 @@ static struct sde_lm_cfg *_lm_offset(enum sde_lm mixer, if (mixer == m->mixer[i].id) { b->base_off = addr; b->blk_off = m->mixer[i].base; + b->length = m->mixer[i].len; b->hwversion = m->hwversion; b->log_mask = SDE_DBG_MASK_LM; return &m->mixer[i]; @@ -195,9 +197,9 @@ struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx, c->cap = cfg; _setup_mixer_ops(m, &c->ops, c->cap->features); - /* - * Perform any default initialization for the sspp blocks - */ + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h index 92dd829eee3e..3d63d01a6d4e 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h @@ -18,6 +18,8 @@ #include "msm_drv.h" +#define SDE_DBG_NAME "sde" + #define SDE_NONE 0 #ifndef SDE_CSC_MATRIX_COEFF_SIZE diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c index 837edeeba4c6..8488d03af79a 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ #include "sde_hwio.h" #include "sde_hw_catalog.h" #include "sde_hw_pingpong.h" +#include "sde_dbg.h" #define PP_TEAR_CHECK_EN 0x000 #define PP_SYNC_CONFIG_VSYNC 0x004 @@ -47,6 +48,7 @@ static struct sde_pingpong_cfg *_pingpong_offset(enum sde_pingpong pp, if (pp == m->pingpong[i].id) { b->base_off = addr; b->blk_off = m->pingpong[i].base; + b->length = m->pingpong[i].len; b->hwversion = m->hwversion; b->log_mask = SDE_DBG_MASK_PINGPONG; return &m->pingpong[i]; @@ -159,6 +161,9 @@ struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx, c->pingpong_hw_cap = cfg; _setup_pingpong_ops(&c->ops, c->pingpong_hw_cap->features); + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c index ea2890d776ae..be620aebf850 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c @@ -15,6 +15,7 @@ #include "sde_hw_lm.h" #include "sde_hw_sspp.h" #include "sde_hw_color_processing.h" +#include "sde_dbg.h" #define SDE_FETCH_CONFIG_RESET_VALUE 0x00000087 @@ -903,6 +904,7 @@ static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp, if (sspp == catalog->sspp[i].id) { b->base_off = addr; b->blk_off = catalog->sspp[i].base; + b->length = catalog->sspp[i].len; b->hwversion = catalog->hwversion; b->log_mask = SDE_DBG_MASK_SSPP; return &catalog->sspp[i]; @@ -917,26 +919,39 @@ struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx, void __iomem *addr, struct sde_mdss_cfg *catalog) { - struct sde_hw_pipe *ctx; + struct sde_hw_pipe *hw_pipe; struct sde_sspp_cfg *cfg; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) + hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL); + if (!hw_pipe) return ERR_PTR(-ENOMEM); - cfg = _sspp_offset(idx, addr, catalog, &ctx->hw); + cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw); if (IS_ERR_OR_NULL(cfg)) { - kfree(ctx); + kfree(hw_pipe); return ERR_PTR(-EINVAL); } /* Assign ops */ - ctx->idx = idx; - ctx->cap = cfg; - _setup_layer_ops(ctx, ctx->cap->features); - ctx->highest_bank_bit = catalog->mdp[0].highest_bank_bit; - - return ctx; + hw_pipe->idx = idx; + hw_pipe->cap = cfg; + _setup_layer_ops(hw_pipe, hw_pipe->cap->features); + hw_pipe->highest_bank_bit = catalog->mdp[0].highest_bank_bit; + + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, + hw_pipe->hw.blk_off, + hw_pipe->hw.blk_off + hw_pipe->hw.length, + hw_pipe->hw.xin_id); + + if (cfg->sblk->scaler_blk.len) + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, + cfg->sblk->scaler_blk.name, + hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base, + hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base + + cfg->sblk->scaler_blk.len, + hw_pipe->hw.xin_id); + + return hw_pipe; } void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx) diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c index d6d2e41ff5aa..218797e623a2 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_top.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,6 +13,7 @@ #include "sde_hwio.h" #include "sde_hw_catalog.h" #include "sde_hw_top.h" +#include "sde_dbg.h" #define SSPP_SPARE 0x28 @@ -225,6 +226,7 @@ static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp, if (mdp == m->mdp[i].id) { b->base_off = addr; b->blk_off = m->mdp[i].base; + b->length = m->mdp[i].len; b->hwversion = m->hwversion; b->log_mask = SDE_DBG_MASK_TOP; return &m->mdp[i]; @@ -258,9 +260,10 @@ struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx, mdp->cap = cfg; _setup_mdp_ops(&mdp->ops, mdp->cap->features); - /* - * Perform any default initialization for the intf - */ + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, + mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length, + mdp->hw.xin_id); + sde_dbg_set_sde_top_offset(mdp->hw.blk_off); return mdp; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h index c38c22237a57..008b657966b6 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_util.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_util.h @@ -24,12 +24,14 @@ * @base_off: mdp register mapped offset * @blk_off: pipe offset relative to mdss offset * @length length of register block offset + * @xin_id xin id * @hwversion mdss hw version number */ struct sde_hw_blk_reg_map { void __iomem *base_off; u32 blk_off; u32 length; + u32 xin_id; u32 hwversion; u32 log_mask; }; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c index 76473fa879c5..048ec47d7c72 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,6 +13,7 @@ #include "sde_hwio.h" #include "sde_hw_catalog.h" #include "sde_hw_vbif.h" +#include "sde_dbg.h" #define VBIF_VERSION 0x0000 #define VBIF_CLK_FORCE_CTRL0 0x0008 @@ -123,6 +124,7 @@ static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif, if (vbif == m->vbif[i].id) { b->base_off = addr; b->blk_off = m->vbif[i].base; + b->length = m->vbif[i].len; b->hwversion = m->hwversion; b->log_mask = SDE_DBG_MASK_VBIF; return &m->vbif[i]; @@ -156,6 +158,8 @@ struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx, c->cap = cfg; _setup_vbif_ops(&c->ops, c->cap->features); + /* no need to register sub-range in sde dbg, dump entire vbif io base */ + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c index 426e9991a6b5..320b05f67669 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,6 +15,7 @@ #include "sde_hw_catalog.h" #include "sde_hw_wb.h" #include "sde_formats.h" +#include "sde_dbg.h" #define WB_DST_FORMAT 0x000 #define WB_DST_OP_MODE 0x004 @@ -57,6 +58,7 @@ static struct sde_wb_cfg *_wb_offset(enum sde_wb wb, if (wb == m->wb[i].id) { b->base_off = addr; b->blk_off = m->wb[i].base; + b->length = m->wb[i].len; b->hwversion = m->hwversion; b->log_mask = SDE_DBG_MASK_WB; return &m->wb[i]; @@ -215,6 +217,9 @@ struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx, c->highest_bank_bit = m->mdp[0].highest_bank_bit; c->hw_mdp = hw_mdp; + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off, + c->hw.blk_off + c->hw.length, c->hw.xin_id); + return c; } diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index cdf67c0aa864..34a32d79f22c 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -1167,6 +1167,44 @@ fail: return ret; } +static void __iomem *_sde_kms_ioremap(struct platform_device *pdev, + const char *name, unsigned long *out_size) +{ + struct resource *res; + unsigned long size; + void __iomem *ptr; + + if (out_size) + *out_size = 0; + + if (name) + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + else + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!res) { + /* availability depends on platform */ + SDE_DEBUG("failed to get memory resource: %s\n", name); + return NULL; + } + + size = resource_size(res); + + ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); + if (!ptr) { + SDE_ERROR("failed to ioremap: %s\n", name); + return NULL; + } + + SDE_DEBUG("IO:region %s %pK %08lx\n", name, ptr, size); + + if (out_size) + *out_size = size; + + return ptr; +} + + static int sde_kms_hw_init(struct msm_kms *kms) { struct sde_kms *sde_kms; @@ -1193,29 +1231,42 @@ static int sde_kms_hw_init(struct msm_kms *kms) goto end; } - sde_kms->mmio = msm_ioremap(dev->platformdev, "mdp_phys", "SDE"); - if (IS_ERR(sde_kms->mmio)) { - rc = PTR_ERR(sde_kms->mmio); - SDE_ERROR("mdp register memory map failed: %d\n", rc); - sde_kms->mmio = NULL; + sde_kms->mmio = _sde_kms_ioremap(dev->platformdev, "mdp_phys", + &sde_kms->mmio_len); + if (!sde_kms->mmio) { + SDE_ERROR("mdp register memory map failed\n"); goto error; } DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio); - sde_kms->vbif[VBIF_RT] = msm_ioremap(dev->platformdev, - "vbif_phys", "VBIF"); - if (IS_ERR(sde_kms->vbif[VBIF_RT])) { - rc = PTR_ERR(sde_kms->vbif[VBIF_RT]); - SDE_ERROR("vbif register memory map failed: %d\n", rc); - sde_kms->vbif[VBIF_RT] = NULL; + rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio, + sde_kms->mmio_len); + if (rc) + SDE_ERROR("dbg base register kms failed: %d\n", rc); + + sde_kms->vbif[VBIF_RT] = _sde_kms_ioremap(dev->platformdev, "vbif_phys", + &sde_kms->vbif_len[VBIF_RT]); + if (!sde_kms->vbif[VBIF_RT]) { + SDE_ERROR("vbif register memory map failed\n"); goto error; } - sde_kms->vbif[VBIF_NRT] = msm_ioremap(dev->platformdev, - "vbif_nrt_phys", "VBIF_NRT"); - if (IS_ERR(sde_kms->vbif[VBIF_NRT])) { - sde_kms->vbif[VBIF_NRT] = NULL; + rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT], + sde_kms->vbif_len[VBIF_RT]); + if (rc) + SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc); + + sde_kms->vbif[VBIF_NRT] = _sde_kms_ioremap(dev->platformdev, + "vbif_nrt_phys", &sde_kms->vbif_len[VBIF_NRT]); + if (!sde_kms->vbif[VBIF_NRT]) { SDE_DEBUG("VBIF NRT is not defined"); + } else { + rc = sde_dbg_reg_register_base("vbif_nrt", + sde_kms->vbif[VBIF_NRT], + sde_kms->vbif_len[VBIF_NRT]); + if (rc) + SDE_ERROR("dbg base register vbif_nrt failed: %d\n", + rc); } sde_kms->core_client = sde_power_client_create(&priv->phandle, "core"); @@ -1245,6 +1296,8 @@ static int sde_kms_hw_init(struct msm_kms *kms) goto power_error; } + sde_dbg_init_dbg_buses(sde_kms->core_rev); + rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio, sde_kms->dev); if (rc) { diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index d929e48a3fe8..dee16d119d47 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -134,6 +134,7 @@ struct sde_kms { /* io/register spaces: */ void __iomem *mmio, *vbif[VBIF_MAX]; + unsigned long mmio_len, vbif_len[VBIF_MAX]; struct regulator *vdd; struct regulator *mmagic; @@ -281,10 +282,12 @@ struct sde_kms_info { /** * SDE_KMS_INFO_DATALEN - Macro for accessing sde_kms_info data length + * it adds an extra character length to count null. * @S: Pointer to sde_kms_info structure * Returns: Size of available byte data */ -#define SDE_KMS_INFO_DATALEN(S) ((S) ? ((struct sde_kms_info *)(S))->len : 0) +#define SDE_KMS_INFO_DATALEN(S) ((S) ? ((struct sde_kms_info *)(S))->len + 1 \ + : 0) /** * sde_kms_info_reset - reset sde_kms_info structure @@ -368,6 +371,49 @@ void sde_kms_info_append_format(struct sde_kms_info *info, void sde_kms_info_stop(struct sde_kms_info *info); /** + * sde_kms_rect_intersect - intersect two rectangles + * @r1: first rectangle + * @r2: scissor rectangle + * @result: result rectangle, all 0's on no intersection found + */ +void sde_kms_rect_intersect(const struct sde_rect *r1, + const struct sde_rect *r2, + struct sde_rect *result); + +/** + * sde_kms_rect_is_equal - compares two rects + * @r1: rect value to compare + * @r2: rect value to compare + * + * Returns 1 if the rects are same, 0 otherwise. + */ +static inline bool sde_kms_rect_is_equal(struct sde_rect *r1, + struct sde_rect *r2) +{ + if ((!r1 && r2) || (r1 && !r2)) + return false; + + if (!r1 && !r2) + return true; + + return r1->x == r2->x && r1->y == r2->y && r1->w == r2->w && + r1->h == r2->h; +} + +/** + * sde_kms_rect_is_null - returns true if the width or height of a rect is 0 + * @rect: rectangle to check for zero size + * @Return: True if width or height of rectangle is 0 + */ +static inline bool sde_kms_rect_is_null(const struct sde_rect *r) +{ + if (!r) + return true; + + return (!r->w || !r->h); +} + +/** * Vblank enable/disable functions */ int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c index 6e29c09deb40..30e12c969538 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms_utils.c +++ b/drivers/gpu/drm/msm/sde/sde_kms_utils.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -151,3 +151,27 @@ void sde_kms_info_stop(struct sde_kms_info *info) info->len = info->staged_len + len; } } + +void sde_kms_rect_intersect(const struct sde_rect *r1, + const struct sde_rect *r2, + struct sde_rect *result) +{ + int l, t, r, b; + + if (!r1 || !r2 || !result) + return; + + l = max(r1->x, r2->x); + t = max(r1->y, r2->y); + r = min((r1->x + r1->w), (r2->x + r2->w)); + b = min((r1->y + r1->h), (r2->y + r2->h)); + + if (r < l || b < t) { + memset(result, 0, sizeof(*result)); + } else { + result->x = l; + result->y = t; + result->w = r - l; + result->h = b - t; + } +} diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c new file mode 100644 index 000000000000..5a0c5e677ed8 --- /dev/null +++ b/drivers/gpu/drm/msm/sde_dbg.c @@ -0,0 +1,2067 @@ +/* Copyright (c) 2009-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ + +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/ktime.h> +#include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/dma-buf.h> +#include <linux/slab.h> +#include <linux/list_sort.h> + +#include "sde_dbg.h" +#include "sde/sde_hw_catalog.h" + +#define SDE_DBG_BASE_MAX 10 + +#define DEFAULT_PANIC 1 +#define DEFAULT_REGDUMP SDE_DBG_DUMP_IN_MEM +#define DEFAULT_DBGBUS_SDE SDE_DBG_DUMP_IN_MEM +#define DEFAULT_DBGBUS_VBIFRT SDE_DBG_DUMP_IN_MEM +#define DEFAULT_BASE_REG_CNT 0x100 +#define GROUP_BYTES 4 +#define ROW_BYTES 16 +#define RANGE_NAME_LEN 40 +#define REG_BASE_NAME_LEN 80 + +#define DBGBUS_FLAGS_DSPP BIT(0) +#define DBGBUS_DSPP_STATUS 0x34C + +#define DBGBUS_NAME_SDE "sde" +#define DBGBUS_NAME_VBIF_RT "vbif_rt" + +/* offsets from sde top address for the debug buses */ +#define DBGBUS_SSPP0 0x188 +#define DBGBUS_SSPP1 0x298 +#define DBGBUS_DSPP 0x348 +#define DBGBUS_PERIPH 0x418 + +#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0)) + +/* following offsets are with respect to MDP VBIF base for DBG BUS access */ +#define MMSS_VBIF_CLKON 0x4 +#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210 +#define MMSS_VBIF_TEST_BUS_OUT 0x230 + +/* print debug ranges in groups of 4 u32s */ +#define REG_DUMP_ALIGN 16 + +/** + * struct sde_dbg_reg_offset - tracking for start and end of region + * @start: start offset + * @start: end offset + */ +struct sde_dbg_reg_offset { + u32 start; + u32 end; +}; + +/** + * struct sde_dbg_reg_range - register dumping named sub-range + * @head: head of this node + * @reg_dump: address for the mem dump + * @range_name: name of this range + * @offset: offsets for range to dump + * @xin_id: client xin id + */ +struct sde_dbg_reg_range { + struct list_head head; + u32 *reg_dump; + char range_name[RANGE_NAME_LEN]; + struct sde_dbg_reg_offset offset; + uint32_t xin_id; +}; + +/** + * struct sde_dbg_reg_base - register region base. + * may sub-ranges: sub-ranges are used for dumping + * or may not have sub-ranges: dumping is base -> max_offset + * @reg_base_head: head of this node + * @sub_range_list: head to the list with dump ranges + * @name: register base name + * @base: base pointer + * @off: cached offset of region for manual register dumping + * @cnt: cached range of region for manual register dumping + * @max_offset: length of region + * @buf: buffer used for manual register dumping + * @buf_len: buffer length used for manual register dumping + * @reg_dump: address for the mem dump if no ranges used + */ +struct sde_dbg_reg_base { + struct list_head reg_base_head; + struct list_head sub_range_list; + char name[REG_BASE_NAME_LEN]; + void __iomem *base; + size_t off; + size_t cnt; + size_t max_offset; + char *buf; + size_t buf_len; + u32 *reg_dump; +}; + +struct sde_debug_bus_entry { + u32 wr_addr; + u32 block_id; + u32 test_id; +}; + +struct vbif_debug_bus_entry { + u32 disable_bus_addr; + u32 block_bus_addr; + u32 bit_offset; + u32 block_cnt; + u32 test_pnt_start; + u32 test_pnt_cnt; +}; + +struct sde_dbg_debug_bus_common { + char *name; + u32 enable_mask; + bool include_in_deferred_work; + u32 flags; + u32 entries_size; + u32 *dumped_content; +}; + +struct sde_dbg_sde_debug_bus { + struct sde_dbg_debug_bus_common cmn; + struct sde_debug_bus_entry *entries; + u32 top_blk_off; +}; + +struct sde_dbg_vbif_debug_bus { + struct sde_dbg_debug_bus_common cmn; + struct vbif_debug_bus_entry *entries; +}; + +/** + * struct sde_dbg_base - global sde debug base structure + * @evtlog: event log instance + * @reg_base_list: list of register dumping regions + * @root: base debugfs root + * @dev: device pointer + * @power_ctrl: callback structure for enabling power for reading hw registers + * @req_dump_blks: list of blocks requested for dumping + * @panic_on_err: whether to kernel panic after triggering dump via debugfs + * @dump_work: work struct for deferring register dump work to separate thread + * @work_panic: panic after dump if internal user passed "panic" special region + * @enable_reg_dump: whether to dump registers into memory, kernel log, or both + * @dbgbus_sde: debug bus structure for the sde + * @dbgbus_vbif_rt: debug bus structure for the realtime vbif + */ +static struct sde_dbg_base { + struct sde_dbg_evtlog *evtlog; + struct list_head reg_base_list; + struct dentry *root; + struct device *dev; + struct sde_dbg_power_ctrl power_ctrl; + + struct sde_dbg_reg_base *req_dump_blks[SDE_DBG_BASE_MAX]; + + u32 panic_on_err; + struct work_struct dump_work; + bool work_panic; + u32 enable_reg_dump; + + struct sde_dbg_sde_debug_bus dbgbus_sde; + struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt; +} sde_dbg_base; + +/* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */ +struct sde_dbg_evtlog *sde_dbg_base_evtlog; + +static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { + + /* Unpack 0 sspp 0*/ + { DBGBUS_SSPP0, 50, 2 }, + { DBGBUS_SSPP0, 60, 2 }, + { DBGBUS_SSPP0, 70, 2 }, + { DBGBUS_SSPP0, 85, 2 }, + + /* Upack 0 sspp 1*/ + { DBGBUS_SSPP1, 50, 2 }, + { DBGBUS_SSPP1, 60, 2 }, + { DBGBUS_SSPP1, 70, 2 }, + { DBGBUS_SSPP1, 85, 2 }, + + /* scheduler */ + { DBGBUS_DSPP, 130, 0 }, + { DBGBUS_DSPP, 130, 1 }, + { DBGBUS_DSPP, 130, 2 }, + { DBGBUS_DSPP, 130, 3 }, + { DBGBUS_DSPP, 130, 4 }, + { DBGBUS_DSPP, 130, 5 }, + + /* qseed */ + { DBGBUS_SSPP0, 6, 0}, + { DBGBUS_SSPP0, 6, 1}, + { DBGBUS_SSPP0, 26, 0}, + { DBGBUS_SSPP0, 26, 1}, + { DBGBUS_SSPP1, 6, 0}, + { DBGBUS_SSPP1, 6, 1}, + { DBGBUS_SSPP1, 26, 0}, + { DBGBUS_SSPP1, 26, 1}, + + /* scale */ + { DBGBUS_SSPP0, 16, 0}, + { DBGBUS_SSPP0, 16, 1}, + { DBGBUS_SSPP0, 36, 0}, + { DBGBUS_SSPP0, 36, 1}, + { DBGBUS_SSPP1, 16, 0}, + { DBGBUS_SSPP1, 16, 1}, + { DBGBUS_SSPP1, 36, 0}, + { DBGBUS_SSPP1, 36, 1}, + + /* fetch sspp0 */ + + /* vig 0 */ + { DBGBUS_SSPP0, 0, 0 }, + { DBGBUS_SSPP0, 0, 1 }, + { DBGBUS_SSPP0, 0, 2 }, + { DBGBUS_SSPP0, 0, 3 }, + { DBGBUS_SSPP0, 0, 4 }, + { DBGBUS_SSPP0, 0, 5 }, + { DBGBUS_SSPP0, 0, 6 }, + { DBGBUS_SSPP0, 0, 7 }, + + { DBGBUS_SSPP0, 1, 0 }, + { DBGBUS_SSPP0, 1, 1 }, + { DBGBUS_SSPP0, 1, 2 }, + { DBGBUS_SSPP0, 1, 3 }, + { DBGBUS_SSPP0, 1, 4 }, + { DBGBUS_SSPP0, 1, 5 }, + { DBGBUS_SSPP0, 1, 6 }, + { DBGBUS_SSPP0, 1, 7 }, + + { DBGBUS_SSPP0, 2, 0 }, + { DBGBUS_SSPP0, 2, 1 }, + { DBGBUS_SSPP0, 2, 2 }, + { DBGBUS_SSPP0, 2, 3 }, + { DBGBUS_SSPP0, 2, 4 }, + { DBGBUS_SSPP0, 2, 5 }, + { DBGBUS_SSPP0, 2, 6 }, + { DBGBUS_SSPP0, 2, 7 }, + + { DBGBUS_SSPP0, 4, 0 }, + { DBGBUS_SSPP0, 4, 1 }, + { DBGBUS_SSPP0, 4, 2 }, + { DBGBUS_SSPP0, 4, 3 }, + { DBGBUS_SSPP0, 4, 4 }, + { DBGBUS_SSPP0, 4, 5 }, + { DBGBUS_SSPP0, 4, 6 }, + { DBGBUS_SSPP0, 4, 7 }, + + { DBGBUS_SSPP0, 5, 0 }, + { DBGBUS_SSPP0, 5, 1 }, + { DBGBUS_SSPP0, 5, 2 }, + { DBGBUS_SSPP0, 5, 3 }, + { DBGBUS_SSPP0, 5, 4 }, + { DBGBUS_SSPP0, 5, 5 }, + { DBGBUS_SSPP0, 5, 6 }, + { DBGBUS_SSPP0, 5, 7 }, + + /* vig 2 */ + { DBGBUS_SSPP0, 20, 0 }, + { DBGBUS_SSPP0, 20, 1 }, + { DBGBUS_SSPP0, 20, 2 }, + { DBGBUS_SSPP0, 20, 3 }, + { DBGBUS_SSPP0, 20, 4 }, + { DBGBUS_SSPP0, 20, 5 }, + { DBGBUS_SSPP0, 20, 6 }, + { DBGBUS_SSPP0, 20, 7 }, + + { DBGBUS_SSPP0, 21, 0 }, + { DBGBUS_SSPP0, 21, 1 }, + { DBGBUS_SSPP0, 21, 2 }, + { DBGBUS_SSPP0, 21, 3 }, + { DBGBUS_SSPP0, 21, 4 }, + { DBGBUS_SSPP0, 21, 5 }, + { DBGBUS_SSPP0, 21, 6 }, + { DBGBUS_SSPP0, 21, 7 }, + + { DBGBUS_SSPP0, 22, 0 }, + { DBGBUS_SSPP0, 22, 1 }, + { DBGBUS_SSPP0, 22, 2 }, + { DBGBUS_SSPP0, 22, 3 }, + { DBGBUS_SSPP0, 22, 4 }, + { DBGBUS_SSPP0, 22, 5 }, + { DBGBUS_SSPP0, 22, 6 }, + { DBGBUS_SSPP0, 22, 7 }, + + { DBGBUS_SSPP0, 24, 0 }, + { DBGBUS_SSPP0, 24, 1 }, + { DBGBUS_SSPP0, 24, 2 }, + { DBGBUS_SSPP0, 24, 3 }, + { DBGBUS_SSPP0, 24, 4 }, + { DBGBUS_SSPP0, 24, 5 }, + { DBGBUS_SSPP0, 24, 6 }, + { DBGBUS_SSPP0, 24, 7 }, + + { DBGBUS_SSPP0, 25, 0 }, + { DBGBUS_SSPP0, 25, 1 }, + { DBGBUS_SSPP0, 25, 2 }, + { DBGBUS_SSPP0, 25, 3 }, + { DBGBUS_SSPP0, 25, 4 }, + { DBGBUS_SSPP0, 25, 5 }, + { DBGBUS_SSPP0, 25, 6 }, + { DBGBUS_SSPP0, 25, 7 }, + + /* dma 2 */ + { DBGBUS_SSPP0, 30, 0 }, + { DBGBUS_SSPP0, 30, 1 }, + { DBGBUS_SSPP0, 30, 2 }, + { DBGBUS_SSPP0, 30, 3 }, + { DBGBUS_SSPP0, 30, 4 }, + { DBGBUS_SSPP0, 30, 5 }, + { DBGBUS_SSPP0, 30, 6 }, + { DBGBUS_SSPP0, 30, 7 }, + + { DBGBUS_SSPP0, 31, 0 }, + { DBGBUS_SSPP0, 31, 1 }, + { DBGBUS_SSPP0, 31, 2 }, + { DBGBUS_SSPP0, 31, 3 }, + { DBGBUS_SSPP0, 31, 4 }, + { DBGBUS_SSPP0, 31, 5 }, + { DBGBUS_SSPP0, 31, 6 }, + { DBGBUS_SSPP0, 31, 7 }, + + { DBGBUS_SSPP0, 32, 0 }, + { DBGBUS_SSPP0, 32, 1 }, + { DBGBUS_SSPP0, 32, 2 }, + { DBGBUS_SSPP0, 32, 3 }, + { DBGBUS_SSPP0, 32, 4 }, + { DBGBUS_SSPP0, 32, 5 }, + { DBGBUS_SSPP0, 32, 6 }, + { DBGBUS_SSPP0, 32, 7 }, + + { DBGBUS_SSPP0, 33, 0 }, + { DBGBUS_SSPP0, 33, 1 }, + { DBGBUS_SSPP0, 33, 2 }, + { DBGBUS_SSPP0, 33, 3 }, + { DBGBUS_SSPP0, 33, 4 }, + { DBGBUS_SSPP0, 33, 5 }, + { DBGBUS_SSPP0, 33, 6 }, + { DBGBUS_SSPP0, 33, 7 }, + + { DBGBUS_SSPP0, 34, 0 }, + { DBGBUS_SSPP0, 34, 1 }, + { DBGBUS_SSPP0, 34, 2 }, + { DBGBUS_SSPP0, 34, 3 }, + { DBGBUS_SSPP0, 34, 4 }, + { DBGBUS_SSPP0, 34, 5 }, + { DBGBUS_SSPP0, 34, 6 }, + { DBGBUS_SSPP0, 34, 7 }, + + { DBGBUS_SSPP0, 35, 0 }, + { DBGBUS_SSPP0, 35, 1 }, + { DBGBUS_SSPP0, 35, 2 }, + { DBGBUS_SSPP0, 35, 3 }, + + /* dma 0 */ + { DBGBUS_SSPP0, 40, 0 }, + { DBGBUS_SSPP0, 40, 1 }, + { DBGBUS_SSPP0, 40, 2 }, + { DBGBUS_SSPP0, 40, 3 }, + { DBGBUS_SSPP0, 40, 4 }, + { DBGBUS_SSPP0, 40, 5 }, + { DBGBUS_SSPP0, 40, 6 }, + { DBGBUS_SSPP0, 40, 7 }, + + { DBGBUS_SSPP0, 41, 0 }, + { DBGBUS_SSPP0, 41, 1 }, + { DBGBUS_SSPP0, 41, 2 }, + { DBGBUS_SSPP0, 41, 3 }, + { DBGBUS_SSPP0, 41, 4 }, + { DBGBUS_SSPP0, 41, 5 }, + { DBGBUS_SSPP0, 41, 6 }, + { DBGBUS_SSPP0, 41, 7 }, + + { DBGBUS_SSPP0, 42, 0 }, + { DBGBUS_SSPP0, 42, 1 }, + { DBGBUS_SSPP0, 42, 2 }, + { DBGBUS_SSPP0, 42, 3 }, + { DBGBUS_SSPP0, 42, 4 }, + { DBGBUS_SSPP0, 42, 5 }, + { DBGBUS_SSPP0, 42, 6 }, + { DBGBUS_SSPP0, 42, 7 }, + + { DBGBUS_SSPP0, 44, 0 }, + { DBGBUS_SSPP0, 44, 1 }, + { DBGBUS_SSPP0, 44, 2 }, + { DBGBUS_SSPP0, 44, 3 }, + { DBGBUS_SSPP0, 44, 4 }, + { DBGBUS_SSPP0, 44, 5 }, + { DBGBUS_SSPP0, 44, 6 }, + { DBGBUS_SSPP0, 44, 7 }, + + { DBGBUS_SSPP0, 45, 0 }, + { DBGBUS_SSPP0, 45, 1 }, + { DBGBUS_SSPP0, 45, 2 }, + { DBGBUS_SSPP0, 45, 3 }, + { DBGBUS_SSPP0, 45, 4 }, + { DBGBUS_SSPP0, 45, 5 }, + { DBGBUS_SSPP0, 45, 6 }, + { DBGBUS_SSPP0, 45, 7 }, + + /* fetch sspp1 */ + /* vig 1 */ + { DBGBUS_SSPP1, 0, 0 }, + { DBGBUS_SSPP1, 0, 1 }, + { DBGBUS_SSPP1, 0, 2 }, + { DBGBUS_SSPP1, 0, 3 }, + { DBGBUS_SSPP1, 0, 4 }, + { DBGBUS_SSPP1, 0, 5 }, + { DBGBUS_SSPP1, 0, 6 }, + { DBGBUS_SSPP1, 0, 7 }, + + { DBGBUS_SSPP1, 1, 0 }, + { DBGBUS_SSPP1, 1, 1 }, + { DBGBUS_SSPP1, 1, 2 }, + { DBGBUS_SSPP1, 1, 3 }, + { DBGBUS_SSPP1, 1, 4 }, + { DBGBUS_SSPP1, 1, 5 }, + { DBGBUS_SSPP1, 1, 6 }, + { DBGBUS_SSPP1, 1, 7 }, + + { DBGBUS_SSPP1, 2, 0 }, + { DBGBUS_SSPP1, 2, 1 }, + { DBGBUS_SSPP1, 2, 2 }, + { DBGBUS_SSPP1, 2, 3 }, + { DBGBUS_SSPP1, 2, 4 }, + { DBGBUS_SSPP1, 2, 5 }, + { DBGBUS_SSPP1, 2, 6 }, + { DBGBUS_SSPP1, 2, 7 }, + + { DBGBUS_SSPP1, 4, 0 }, + { DBGBUS_SSPP1, 4, 1 }, + { DBGBUS_SSPP1, 4, 2 }, + { DBGBUS_SSPP1, 4, 3 }, + { DBGBUS_SSPP1, 4, 4 }, + { DBGBUS_SSPP1, 4, 5 }, + { DBGBUS_SSPP1, 4, 6 }, + { DBGBUS_SSPP1, 4, 7 }, + + { DBGBUS_SSPP1, 5, 0 }, + { DBGBUS_SSPP1, 5, 1 }, + { DBGBUS_SSPP1, 5, 2 }, + { DBGBUS_SSPP1, 5, 3 }, + { DBGBUS_SSPP1, 5, 4 }, + { DBGBUS_SSPP1, 5, 5 }, + { DBGBUS_SSPP1, 5, 6 }, + { DBGBUS_SSPP1, 5, 7 }, + + /* vig 3 */ + { DBGBUS_SSPP1, 20, 0 }, + { DBGBUS_SSPP1, 20, 1 }, + { DBGBUS_SSPP1, 20, 2 }, + { DBGBUS_SSPP1, 20, 3 }, + { DBGBUS_SSPP1, 20, 4 }, + { DBGBUS_SSPP1, 20, 5 }, + { DBGBUS_SSPP1, 20, 6 }, + { DBGBUS_SSPP1, 20, 7 }, + + { DBGBUS_SSPP1, 21, 0 }, + { DBGBUS_SSPP1, 21, 1 }, + { DBGBUS_SSPP1, 21, 2 }, + { DBGBUS_SSPP1, 21, 3 }, + { DBGBUS_SSPP1, 21, 4 }, + { DBGBUS_SSPP1, 21, 5 }, + { DBGBUS_SSPP1, 21, 6 }, + { DBGBUS_SSPP1, 21, 7 }, + + { DBGBUS_SSPP1, 22, 0 }, + { DBGBUS_SSPP1, 22, 1 }, + { DBGBUS_SSPP1, 22, 2 }, + { DBGBUS_SSPP1, 22, 3 }, + { DBGBUS_SSPP1, 22, 4 }, + { DBGBUS_SSPP1, 22, 5 }, + { DBGBUS_SSPP1, 22, 6 }, + { DBGBUS_SSPP1, 22, 7 }, + + { DBGBUS_SSPP1, 24, 0 }, + { DBGBUS_SSPP1, 24, 1 }, + { DBGBUS_SSPP1, 24, 2 }, + { DBGBUS_SSPP1, 24, 3 }, + { DBGBUS_SSPP1, 24, 4 }, + { DBGBUS_SSPP1, 24, 5 }, + { DBGBUS_SSPP1, 24, 6 }, + { DBGBUS_SSPP1, 24, 7 }, + + { DBGBUS_SSPP1, 25, 0 }, + { DBGBUS_SSPP1, 25, 1 }, + { DBGBUS_SSPP1, 25, 2 }, + { DBGBUS_SSPP1, 25, 3 }, + { DBGBUS_SSPP1, 25, 4 }, + { DBGBUS_SSPP1, 25, 5 }, + { DBGBUS_SSPP1, 25, 6 }, + { DBGBUS_SSPP1, 25, 7 }, + + /* dma 3 */ + { DBGBUS_SSPP1, 30, 0 }, + { DBGBUS_SSPP1, 30, 1 }, + { DBGBUS_SSPP1, 30, 2 }, + { DBGBUS_SSPP1, 30, 3 }, + { DBGBUS_SSPP1, 30, 4 }, + { DBGBUS_SSPP1, 30, 5 }, + { DBGBUS_SSPP1, 30, 6 }, + { DBGBUS_SSPP1, 30, 7 }, + + { DBGBUS_SSPP1, 31, 0 }, + { DBGBUS_SSPP1, 31, 1 }, + { DBGBUS_SSPP1, 31, 2 }, + { DBGBUS_SSPP1, 31, 3 }, + { DBGBUS_SSPP1, 31, 4 }, + { DBGBUS_SSPP1, 31, 5 }, + { DBGBUS_SSPP1, 31, 6 }, + { DBGBUS_SSPP1, 31, 7 }, + + { DBGBUS_SSPP1, 32, 0 }, + { DBGBUS_SSPP1, 32, 1 }, + { DBGBUS_SSPP1, 32, 2 }, + { DBGBUS_SSPP1, 32, 3 }, + { DBGBUS_SSPP1, 32, 4 }, + { DBGBUS_SSPP1, 32, 5 }, + { DBGBUS_SSPP1, 32, 6 }, + { DBGBUS_SSPP1, 32, 7 }, + + { DBGBUS_SSPP1, 33, 0 }, + { DBGBUS_SSPP1, 33, 1 }, + { DBGBUS_SSPP1, 33, 2 }, + { DBGBUS_SSPP1, 33, 3 }, + { DBGBUS_SSPP1, 33, 4 }, + { DBGBUS_SSPP1, 33, 5 }, + { DBGBUS_SSPP1, 33, 6 }, + { DBGBUS_SSPP1, 33, 7 }, + + { DBGBUS_SSPP1, 34, 0 }, + { DBGBUS_SSPP1, 34, 1 }, + { DBGBUS_SSPP1, 34, 2 }, + { DBGBUS_SSPP1, 34, 3 }, + { DBGBUS_SSPP1, 34, 4 }, + { DBGBUS_SSPP1, 34, 5 }, + { DBGBUS_SSPP1, 34, 6 }, + { DBGBUS_SSPP1, 34, 7 }, + + { DBGBUS_SSPP1, 35, 0 }, + { DBGBUS_SSPP1, 35, 1 }, + { DBGBUS_SSPP1, 35, 2 }, + + /* dma 1 */ + { DBGBUS_SSPP1, 40, 0 }, + { DBGBUS_SSPP1, 40, 1 }, + { DBGBUS_SSPP1, 40, 2 }, + { DBGBUS_SSPP1, 40, 3 }, + { DBGBUS_SSPP1, 40, 4 }, + { DBGBUS_SSPP1, 40, 5 }, + { DBGBUS_SSPP1, 40, 6 }, + { DBGBUS_SSPP1, 40, 7 }, + + { DBGBUS_SSPP1, 41, 0 }, + { DBGBUS_SSPP1, 41, 1 }, + { DBGBUS_SSPP1, 41, 2 }, + { DBGBUS_SSPP1, 41, 3 }, + { DBGBUS_SSPP1, 41, 4 }, + { DBGBUS_SSPP1, 41, 5 }, + { DBGBUS_SSPP1, 41, 6 }, + { DBGBUS_SSPP1, 41, 7 }, + + { DBGBUS_SSPP1, 42, 0 }, + { DBGBUS_SSPP1, 42, 1 }, + { DBGBUS_SSPP1, 42, 2 }, + { DBGBUS_SSPP1, 42, 3 }, + { DBGBUS_SSPP1, 42, 4 }, + { DBGBUS_SSPP1, 42, 5 }, + { DBGBUS_SSPP1, 42, 6 }, + { DBGBUS_SSPP1, 42, 7 }, + + { DBGBUS_SSPP1, 44, 0 }, + { DBGBUS_SSPP1, 44, 1 }, + { DBGBUS_SSPP1, 44, 2 }, + { DBGBUS_SSPP1, 44, 3 }, + { DBGBUS_SSPP1, 44, 4 }, + { DBGBUS_SSPP1, 44, 5 }, + { DBGBUS_SSPP1, 44, 6 }, + { DBGBUS_SSPP1, 44, 7 }, + + { DBGBUS_SSPP1, 45, 0 }, + { DBGBUS_SSPP1, 45, 1 }, + { DBGBUS_SSPP1, 45, 2 }, + { DBGBUS_SSPP1, 45, 3 }, + { DBGBUS_SSPP1, 45, 4 }, + { DBGBUS_SSPP1, 45, 5 }, + { DBGBUS_SSPP1, 45, 6 }, + { DBGBUS_SSPP1, 45, 7 }, + + /* cursor 1 */ + { DBGBUS_SSPP1, 80, 0 }, + { DBGBUS_SSPP1, 80, 1 }, + { DBGBUS_SSPP1, 80, 2 }, + { DBGBUS_SSPP1, 80, 3 }, + { DBGBUS_SSPP1, 80, 4 }, + { DBGBUS_SSPP1, 80, 5 }, + { DBGBUS_SSPP1, 80, 6 }, + { DBGBUS_SSPP1, 80, 7 }, + + { DBGBUS_SSPP1, 81, 0 }, + { DBGBUS_SSPP1, 81, 1 }, + { DBGBUS_SSPP1, 81, 2 }, + { DBGBUS_SSPP1, 81, 3 }, + { DBGBUS_SSPP1, 81, 4 }, + { DBGBUS_SSPP1, 81, 5 }, + { DBGBUS_SSPP1, 81, 6 }, + { DBGBUS_SSPP1, 81, 7 }, + + { DBGBUS_SSPP1, 82, 0 }, + { DBGBUS_SSPP1, 82, 1 }, + { DBGBUS_SSPP1, 82, 2 }, + { DBGBUS_SSPP1, 82, 3 }, + { DBGBUS_SSPP1, 82, 4 }, + { DBGBUS_SSPP1, 82, 5 }, + { DBGBUS_SSPP1, 82, 6 }, + { DBGBUS_SSPP1, 82, 7 }, + + { DBGBUS_SSPP1, 83, 0 }, + { DBGBUS_SSPP1, 83, 1 }, + { DBGBUS_SSPP1, 83, 2 }, + { DBGBUS_SSPP1, 83, 3 }, + { DBGBUS_SSPP1, 83, 4 }, + { DBGBUS_SSPP1, 83, 5 }, + { DBGBUS_SSPP1, 83, 6 }, + { DBGBUS_SSPP1, 83, 7 }, + + { DBGBUS_SSPP1, 84, 0 }, + { DBGBUS_SSPP1, 84, 1 }, + { DBGBUS_SSPP1, 84, 2 }, + { DBGBUS_SSPP1, 84, 3 }, + { DBGBUS_SSPP1, 84, 4 }, + { DBGBUS_SSPP1, 84, 5 }, + { DBGBUS_SSPP1, 84, 6 }, + { DBGBUS_SSPP1, 84, 7 }, + + /* dspp */ + { DBGBUS_DSPP, 13, 0 }, + { DBGBUS_DSPP, 19, 0 }, + { DBGBUS_DSPP, 14, 0 }, + { DBGBUS_DSPP, 14, 1 }, + { DBGBUS_DSPP, 14, 3 }, + { DBGBUS_DSPP, 20, 0 }, + { DBGBUS_DSPP, 20, 1 }, + { DBGBUS_DSPP, 20, 3 }, + + /* ppb_0 */ + { DBGBUS_DSPP, 31, 0 }, + { DBGBUS_DSPP, 33, 0 }, + { DBGBUS_DSPP, 35, 0 }, + { DBGBUS_DSPP, 42, 0 }, + + /* ppb_1 */ + { DBGBUS_DSPP, 32, 0 }, + { DBGBUS_DSPP, 34, 0 }, + { DBGBUS_DSPP, 36, 0 }, + { DBGBUS_DSPP, 43, 0 }, + + /* lm_lut */ + { DBGBUS_DSPP, 109, 0 }, + { DBGBUS_DSPP, 105, 0 }, + { DBGBUS_DSPP, 103, 0 }, + + /* tear-check */ + { DBGBUS_PERIPH, 63, 0 }, + { DBGBUS_PERIPH, 64, 0 }, + { DBGBUS_PERIPH, 65, 0 }, + { DBGBUS_PERIPH, 73, 0 }, + { DBGBUS_PERIPH, 74, 0 }, + + /* crossbar */ + { DBGBUS_DSPP, 0, 0}, + + /* rotator */ + { DBGBUS_DSPP, 9, 0}, + + /* blend */ + /* LM0 */ + { DBGBUS_DSPP, 63, 0}, + { DBGBUS_DSPP, 63, 1}, + { DBGBUS_DSPP, 63, 2}, + { DBGBUS_DSPP, 63, 3}, + { DBGBUS_DSPP, 63, 4}, + { DBGBUS_DSPP, 63, 5}, + { DBGBUS_DSPP, 63, 6}, + { DBGBUS_DSPP, 63, 7}, + + { DBGBUS_DSPP, 64, 0}, + { DBGBUS_DSPP, 64, 1}, + { DBGBUS_DSPP, 64, 2}, + { DBGBUS_DSPP, 64, 3}, + { DBGBUS_DSPP, 64, 4}, + { DBGBUS_DSPP, 64, 5}, + { DBGBUS_DSPP, 64, 6}, + { DBGBUS_DSPP, 64, 7}, + + { DBGBUS_DSPP, 65, 0}, + { DBGBUS_DSPP, 65, 1}, + { DBGBUS_DSPP, 65, 2}, + { DBGBUS_DSPP, 65, 3}, + { DBGBUS_DSPP, 65, 4}, + { DBGBUS_DSPP, 65, 5}, + { DBGBUS_DSPP, 65, 6}, + { DBGBUS_DSPP, 65, 7}, + + { DBGBUS_DSPP, 66, 0}, + { DBGBUS_DSPP, 66, 1}, + { DBGBUS_DSPP, 66, 2}, + { DBGBUS_DSPP, 66, 3}, + { DBGBUS_DSPP, 66, 4}, + { DBGBUS_DSPP, 66, 5}, + { DBGBUS_DSPP, 66, 6}, + { DBGBUS_DSPP, 66, 7}, + + { DBGBUS_DSPP, 67, 0}, + { DBGBUS_DSPP, 67, 1}, + { DBGBUS_DSPP, 67, 2}, + { DBGBUS_DSPP, 67, 3}, + { DBGBUS_DSPP, 67, 4}, + { DBGBUS_DSPP, 67, 5}, + { DBGBUS_DSPP, 67, 6}, + { DBGBUS_DSPP, 67, 7}, + + { DBGBUS_DSPP, 68, 0}, + { DBGBUS_DSPP, 68, 1}, + { DBGBUS_DSPP, 68, 2}, + { DBGBUS_DSPP, 68, 3}, + { DBGBUS_DSPP, 68, 4}, + { DBGBUS_DSPP, 68, 5}, + { DBGBUS_DSPP, 68, 6}, + { DBGBUS_DSPP, 68, 7}, + + { DBGBUS_DSPP, 69, 0}, + { DBGBUS_DSPP, 69, 1}, + { DBGBUS_DSPP, 69, 2}, + { DBGBUS_DSPP, 69, 3}, + { DBGBUS_DSPP, 69, 4}, + { DBGBUS_DSPP, 69, 5}, + { DBGBUS_DSPP, 69, 6}, + { DBGBUS_DSPP, 69, 7}, + + /* LM1 */ + { DBGBUS_DSPP, 70, 0}, + { DBGBUS_DSPP, 70, 1}, + { DBGBUS_DSPP, 70, 2}, + { DBGBUS_DSPP, 70, 3}, + { DBGBUS_DSPP, 70, 4}, + { DBGBUS_DSPP, 70, 5}, + { DBGBUS_DSPP, 70, 6}, + { DBGBUS_DSPP, 70, 7}, + + { DBGBUS_DSPP, 71, 0}, + { DBGBUS_DSPP, 71, 1}, + { DBGBUS_DSPP, 71, 2}, + { DBGBUS_DSPP, 71, 3}, + { DBGBUS_DSPP, 71, 4}, + { DBGBUS_DSPP, 71, 5}, + { DBGBUS_DSPP, 71, 6}, + { DBGBUS_DSPP, 71, 7}, + + { DBGBUS_DSPP, 72, 0}, + { DBGBUS_DSPP, 72, 1}, + { DBGBUS_DSPP, 72, 2}, + { DBGBUS_DSPP, 72, 3}, + { DBGBUS_DSPP, 72, 4}, + { DBGBUS_DSPP, 72, 5}, + { DBGBUS_DSPP, 72, 6}, + { DBGBUS_DSPP, 72, 7}, + + { DBGBUS_DSPP, 73, 0}, + { DBGBUS_DSPP, 73, 1}, + { DBGBUS_DSPP, 73, 2}, + { DBGBUS_DSPP, 73, 3}, + { DBGBUS_DSPP, 73, 4}, + { DBGBUS_DSPP, 73, 5}, + { DBGBUS_DSPP, 73, 6}, + { DBGBUS_DSPP, 73, 7}, + + { DBGBUS_DSPP, 74, 0}, + { DBGBUS_DSPP, 74, 1}, + { DBGBUS_DSPP, 74, 2}, + { DBGBUS_DSPP, 74, 3}, + { DBGBUS_DSPP, 74, 4}, + { DBGBUS_DSPP, 74, 5}, + { DBGBUS_DSPP, 74, 6}, + { DBGBUS_DSPP, 74, 7}, + + { DBGBUS_DSPP, 75, 0}, + { DBGBUS_DSPP, 75, 1}, + { DBGBUS_DSPP, 75, 2}, + { DBGBUS_DSPP, 75, 3}, + { DBGBUS_DSPP, 75, 4}, + { DBGBUS_DSPP, 75, 5}, + { DBGBUS_DSPP, 75, 6}, + { DBGBUS_DSPP, 75, 7}, + + { DBGBUS_DSPP, 76, 0}, + { DBGBUS_DSPP, 76, 1}, + { DBGBUS_DSPP, 76, 2}, + { DBGBUS_DSPP, 76, 3}, + { DBGBUS_DSPP, 76, 4}, + { DBGBUS_DSPP, 76, 5}, + { DBGBUS_DSPP, 76, 6}, + { DBGBUS_DSPP, 76, 7}, + + /* LM2 */ + { DBGBUS_DSPP, 77, 0}, + { DBGBUS_DSPP, 77, 1}, + { DBGBUS_DSPP, 77, 2}, + { DBGBUS_DSPP, 77, 3}, + { DBGBUS_DSPP, 77, 4}, + { DBGBUS_DSPP, 77, 5}, + { DBGBUS_DSPP, 77, 6}, + { DBGBUS_DSPP, 77, 7}, + + { DBGBUS_DSPP, 78, 0}, + { DBGBUS_DSPP, 78, 1}, + { DBGBUS_DSPP, 78, 2}, + { DBGBUS_DSPP, 78, 3}, + { DBGBUS_DSPP, 78, 4}, + { DBGBUS_DSPP, 78, 5}, + { DBGBUS_DSPP, 78, 6}, + { DBGBUS_DSPP, 78, 7}, + + { DBGBUS_DSPP, 79, 0}, + { DBGBUS_DSPP, 79, 1}, + { DBGBUS_DSPP, 79, 2}, + { DBGBUS_DSPP, 79, 3}, + { DBGBUS_DSPP, 79, 4}, + { DBGBUS_DSPP, 79, 5}, + { DBGBUS_DSPP, 79, 6}, + { DBGBUS_DSPP, 79, 7}, + + { DBGBUS_DSPP, 80, 0}, + { DBGBUS_DSPP, 80, 1}, + { DBGBUS_DSPP, 80, 2}, + { DBGBUS_DSPP, 80, 3}, + { DBGBUS_DSPP, 80, 4}, + { DBGBUS_DSPP, 80, 5}, + { DBGBUS_DSPP, 80, 6}, + { DBGBUS_DSPP, 80, 7}, + + { DBGBUS_DSPP, 81, 0}, + { DBGBUS_DSPP, 81, 1}, + { DBGBUS_DSPP, 81, 2}, + { DBGBUS_DSPP, 81, 3}, + { DBGBUS_DSPP, 81, 4}, + { DBGBUS_DSPP, 81, 5}, + { DBGBUS_DSPP, 81, 6}, + { DBGBUS_DSPP, 81, 7}, + + { DBGBUS_DSPP, 82, 0}, + { DBGBUS_DSPP, 82, 1}, + { DBGBUS_DSPP, 82, 2}, + { DBGBUS_DSPP, 82, 3}, + { DBGBUS_DSPP, 82, 4}, + { DBGBUS_DSPP, 82, 5}, + { DBGBUS_DSPP, 82, 6}, + { DBGBUS_DSPP, 82, 7}, + + { DBGBUS_DSPP, 83, 0}, + { DBGBUS_DSPP, 83, 1}, + { DBGBUS_DSPP, 83, 2}, + { DBGBUS_DSPP, 83, 3}, + { DBGBUS_DSPP, 83, 4}, + { DBGBUS_DSPP, 83, 5}, + { DBGBUS_DSPP, 83, 6}, + { DBGBUS_DSPP, 83, 7}, + + /* csc */ + { DBGBUS_SSPP0, 7, 0}, + { DBGBUS_SSPP0, 7, 1}, + { DBGBUS_SSPP0, 27, 0}, + { DBGBUS_SSPP0, 27, 1}, + { DBGBUS_SSPP1, 7, 0}, + { DBGBUS_SSPP1, 7, 1}, + { DBGBUS_SSPP1, 27, 0}, + { DBGBUS_SSPP1, 27, 1}, + + /* pcc */ + { DBGBUS_SSPP0, 3, 3}, + { DBGBUS_SSPP0, 23, 3}, + { DBGBUS_SSPP0, 33, 3}, + { DBGBUS_SSPP0, 43, 3}, + { DBGBUS_SSPP1, 3, 3}, + { DBGBUS_SSPP1, 23, 3}, + { DBGBUS_SSPP1, 33, 3}, + { DBGBUS_SSPP1, 43, 3}, + + /* spa */ + { DBGBUS_SSPP0, 8, 0}, + { DBGBUS_SSPP0, 28, 0}, + { DBGBUS_SSPP1, 8, 0}, + { DBGBUS_SSPP1, 28, 0}, + { DBGBUS_DSPP, 13, 0}, + { DBGBUS_DSPP, 19, 0}, + + /* igc */ + { DBGBUS_SSPP0, 9, 0}, + { DBGBUS_SSPP0, 9, 1}, + { DBGBUS_SSPP0, 9, 3}, + { DBGBUS_SSPP0, 29, 0}, + { DBGBUS_SSPP0, 29, 1}, + { DBGBUS_SSPP0, 29, 3}, + { DBGBUS_SSPP0, 17, 0}, + { DBGBUS_SSPP0, 17, 1}, + { DBGBUS_SSPP0, 17, 3}, + { DBGBUS_SSPP0, 37, 0}, + { DBGBUS_SSPP0, 37, 1}, + { DBGBUS_SSPP0, 37, 3}, + { DBGBUS_SSPP0, 46, 0}, + { DBGBUS_SSPP0, 46, 1}, + { DBGBUS_SSPP0, 46, 3}, + + { DBGBUS_SSPP1, 9, 0}, + { DBGBUS_SSPP1, 9, 1}, + { DBGBUS_SSPP1, 9, 3}, + { DBGBUS_SSPP1, 29, 0}, + { DBGBUS_SSPP1, 29, 1}, + { DBGBUS_SSPP1, 29, 3}, + { DBGBUS_SSPP1, 17, 0}, + { DBGBUS_SSPP1, 17, 1}, + { DBGBUS_SSPP1, 17, 3}, + { DBGBUS_SSPP1, 37, 0}, + { DBGBUS_SSPP1, 37, 1}, + { DBGBUS_SSPP1, 37, 3}, + { DBGBUS_SSPP1, 46, 0}, + { DBGBUS_SSPP1, 46, 1}, + { DBGBUS_SSPP1, 46, 3}, + + { DBGBUS_DSPP, 14, 0}, + { DBGBUS_DSPP, 14, 1}, + { DBGBUS_DSPP, 14, 3}, + { DBGBUS_DSPP, 20, 0}, + { DBGBUS_DSPP, 20, 1}, + { DBGBUS_DSPP, 20, 3}, + + { DBGBUS_PERIPH, 60, 0}, +}; + +static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = { + {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */ + {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */ + {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */ + {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */ + {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */ + {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */ + {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */ +}; + +/** + * _sde_dbg_enable_power - use callback to turn power on for hw register access + * @enable: whether to turn power on or off + */ +static inline void _sde_dbg_enable_power(int enable) +{ + if (!sde_dbg_base.power_ctrl.enable_fn) + return; + sde_dbg_base.power_ctrl.enable_fn( + sde_dbg_base.power_ctrl.handle, + sde_dbg_base.power_ctrl.client, + enable); +} + +/** + * _sde_dump_reg - helper function for dumping rotator register set content + * @dump_name: register set name + * @reg_dump_flag: dumping flag controlling in-log/memory dump location + * @base_addr: starting address of io region for calculating offsets to print + * @addr: starting address offset for dumping + * @len_bytes: range of the register set + * @dump_mem: output buffer for memory dump location option + * @from_isr: whether being called from isr context + */ +static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag, + char __iomem *base_addr, char __iomem *addr, size_t len_bytes, + u32 **dump_mem, bool from_isr) +{ + u32 in_log, in_mem, len_align, len_padded; + u32 *dump_addr = NULL; + char __iomem *end_addr; + int i; + + if (!len_bytes) + return; + + in_log = (reg_dump_flag & SDE_DBG_DUMP_IN_LOG); + in_mem = (reg_dump_flag & SDE_DBG_DUMP_IN_MEM); + + pr_debug("%s: reg_dump_flag=%d in_log=%d in_mem=%d\n", + dump_name, reg_dump_flag, in_log, in_mem); + + if (!in_log && !in_mem) + return; + + if (in_log) + dev_info(sde_dbg_base.dev, "%s: start_offset 0x%lx len 0x%zx\n", + dump_name, addr - base_addr, len_bytes); + + len_align = (len_bytes + REG_DUMP_ALIGN - 1) / REG_DUMP_ALIGN; + len_padded = len_align * REG_DUMP_ALIGN; + end_addr = addr + len_bytes; + + if (in_mem) { + if (dump_mem && !(*dump_mem)) { + phys_addr_t phys = 0; + *dump_mem = dma_alloc_coherent(sde_dbg_base.dev, + len_padded, &phys, GFP_KERNEL); + } + + if (dump_mem && *dump_mem) { + dump_addr = *dump_mem; + dev_info(sde_dbg_base.dev, + "%s: start_addr:0x%pK len:0x%x reg_offset=0x%lx\n", + dump_name, dump_addr, len_padded, + addr - base_addr); + } else { + in_mem = 0; + pr_err("dump_mem: kzalloc fails!\n"); + } + } + + if (!from_isr) + _sde_dbg_enable_power(true); + + for (i = 0; i < len_align; i++) { + u32 x0, x4, x8, xc; + + x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0; + x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0; + x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0; + xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0; + + if (in_log) + dev_info(sde_dbg_base.dev, + "0x%lx : %08x %08x %08x %08x\n", + addr - base_addr, x0, x4, x8, xc); + + if (dump_addr) { + dump_addr[i * 4] = x0; + dump_addr[i * 4 + 1] = x4; + dump_addr[i * 4 + 2] = x8; + dump_addr[i * 4 + 3] = xc; + } + + addr += REG_DUMP_ALIGN; + } + + if (!from_isr) + _sde_dbg_enable_power(false); +} + +/** + * _sde_dbg_get_dump_range - helper to retrieve dump length for a range node + * @range_node: range node to dump + * @max_offset: max offset of the register base + * @Return: length + */ +static u32 _sde_dbg_get_dump_range(struct sde_dbg_reg_offset *range_node, + size_t max_offset) +{ + u32 length = 0; + + if ((range_node->start > range_node->end) || + (range_node->end > max_offset) || (range_node->start == 0 + && range_node->end == 0)) { + length = max_offset; + } else { + length = range_node->end - range_node->start; + } + + return length; +} + +static int _sde_dump_reg_range_cmp(void *priv, struct list_head *a, + struct list_head *b) +{ + struct sde_dbg_reg_range *ar, *br; + + if (!a || !b) + return 0; + + ar = container_of(a, struct sde_dbg_reg_range, head); + br = container_of(b, struct sde_dbg_reg_range, head); + + return ar->offset.start - br->offset.start; +} + +/** + * _sde_dump_reg_by_ranges - dump ranges or full range of the register blk base + * @dbg: register blk base structure + * @reg_dump_flag: dump target, memory, kernel log, or both + */ +static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg, + u32 reg_dump_flag) +{ + char __iomem *addr; + size_t len; + struct sde_dbg_reg_range *range_node; + + if (!dbg || !dbg->base) { + pr_err("dbg base is null!\n"); + return; + } + + dev_info(sde_dbg_base.dev, "%s:=========%s DUMP=========\n", __func__, + dbg->name); + + /* If there is a list to dump the registers by ranges, use the ranges */ + if (!list_empty(&dbg->sub_range_list)) { + /* sort the list by start address first */ + list_sort(NULL, &dbg->sub_range_list, _sde_dump_reg_range_cmp); + list_for_each_entry(range_node, &dbg->sub_range_list, head) { + len = _sde_dbg_get_dump_range(&range_node->offset, + dbg->max_offset); + addr = dbg->base + range_node->offset.start; + pr_debug("%s: range_base=0x%pK start=0x%x end=0x%x\n", + range_node->range_name, + addr, range_node->offset.start, + range_node->offset.end); + + _sde_dump_reg(range_node->range_name, reg_dump_flag, + dbg->base, addr, len, + &range_node->reg_dump, false); + } + } else { + /* If there is no list to dump ranges, dump all registers */ + dev_info(sde_dbg_base.dev, + "Ranges not found, will dump full registers\n"); + dev_info(sde_dbg_base.dev, "base:0x%pK len:0x%zx\n", dbg->base, + dbg->max_offset); + addr = dbg->base; + len = dbg->max_offset; + _sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len, + &dbg->reg_dump, false); + } +} + +/** + * _sde_dump_reg_by_blk - dump a named register base region + * @blk_name: register blk name + */ +static void _sde_dump_reg_by_blk(const char *blk_name) +{ + struct sde_dbg_base *dbg_base = &sde_dbg_base; + struct sde_dbg_reg_base *blk_base; + + if (!dbg_base) + return; + + list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) { + if (strlen(blk_base->name) && + !strcmp(blk_base->name, blk_name)) { + _sde_dump_reg_by_ranges(blk_base, + dbg_base->enable_reg_dump); + break; + } + } +} + +/** + * _sde_dump_reg_all - dump all register regions + */ +static void _sde_dump_reg_all(void) +{ + struct sde_dbg_base *dbg_base = &sde_dbg_base; + struct sde_dbg_reg_base *blk_base; + + if (!dbg_base) + return; + + list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) + if (strlen(blk_base->name)) + _sde_dump_reg_by_blk(blk_base->name); +} + +/** + * _sde_dump_get_blk_addr - retrieve register block address by name + * @blk_name: register blk name + * @Return: register blk base, or NULL + */ +static struct sde_dbg_reg_base *_sde_dump_get_blk_addr(const char *blk_name) +{ + struct sde_dbg_base *dbg_base = &sde_dbg_base; + struct sde_dbg_reg_base *blk_base; + + list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) + if (strlen(blk_base->name) && !strcmp(blk_base->name, blk_name)) + return blk_base; + + return NULL; +} + +static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus) +{ + bool in_log, in_mem; + u32 **dump_mem = NULL; + u32 *dump_addr = NULL; + u32 status = 0; + struct sde_debug_bus_entry *head; + phys_addr_t phys = 0; + int list_size; + int i; + u32 offset; + void __iomem *mem_base = NULL; + struct sde_dbg_reg_base *reg_base; + + if (!bus || !bus->cmn.entries_size) + return; + + list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list, + reg_base_head) + if (strlen(reg_base->name) && + !strcmp(reg_base->name, bus->cmn.name)) + mem_base = reg_base->base + bus->top_blk_off; + + if (!mem_base) { + pr_err("unable to find mem_base for %s\n", bus->cmn.name); + return; + } + + dump_mem = &bus->cmn.dumped_content; + + /* will keep in memory 4 entries of 4 bytes each */ + list_size = (bus->cmn.entries_size * 4 * 4); + + in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG); + in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM); + + if (!in_log && !in_mem) + return; + + dev_info(sde_dbg_base.dev, "======== start %s dump =========\n", + bus->cmn.name); + + if (in_mem) { + if (!(*dump_mem)) + *dump_mem = dma_alloc_coherent(sde_dbg_base.dev, + list_size, &phys, GFP_KERNEL); + + if (*dump_mem) { + dump_addr = *dump_mem; + dev_info(sde_dbg_base.dev, + "%s: start_addr:0x%pK len:0x%x\n", + __func__, dump_addr, list_size); + } else { + in_mem = false; + pr_err("dump_mem: allocation fails\n"); + } + } + + _sde_dbg_enable_power(true); + for (i = 0; i < bus->cmn.entries_size; i++) { + head = bus->entries + i; + writel_relaxed(TEST_MASK(head->block_id, head->test_id), + mem_base + head->wr_addr); + wmb(); /* make sure test bits were written */ + + if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) + offset = DBGBUS_DSPP_STATUS; + else + offset = head->wr_addr + 0x4; + + status = readl_relaxed(mem_base + offset); + + if (in_log) + dev_info(sde_dbg_base.dev, + "waddr=0x%x blk=%d tst=%d val=0x%x\n", + head->wr_addr, head->block_id, + head->test_id, status); + + if (dump_addr && in_mem) { + dump_addr[i*4] = head->wr_addr; + dump_addr[i*4 + 1] = head->block_id; + dump_addr[i*4 + 2] = head->test_id; + dump_addr[i*4 + 3] = status; + } + + /* Disable debug bus once we are done */ + writel_relaxed(0, mem_base + head->wr_addr); + + } + _sde_dbg_enable_power(false); + + dev_info(sde_dbg_base.dev, "======== end %s dump =========\n", + bus->cmn.name); +} + +static void _sde_dbg_dump_vbif_debug_bus_entry( + struct vbif_debug_bus_entry *head, void __iomem *mem_base, + u32 *dump_addr, bool in_log) +{ + int i, j; + u32 val; + + if (!dump_addr && !in_log) + return; + + for (i = 0; i < head->block_cnt; i++) { + writel_relaxed(1 << (i + head->bit_offset), + mem_base + head->block_bus_addr); + /* make sure that current bus blcok enable */ + wmb(); + for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) { + writel_relaxed(j, mem_base + head->block_bus_addr + 4); + /* make sure that test point is enabled */ + wmb(); + val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT); + if (dump_addr) { + *dump_addr++ = head->block_bus_addr; + *dump_addr++ = i; + *dump_addr++ = j; + *dump_addr++ = val; + } + if (in_log) + dev_info(sde_dbg_base.dev, + "testpoint:%x arb/xin id=%d index=%d val=0x%x\n", + head->block_bus_addr, i, j, val); + } + } +} + +static void _sde_dbg_dump_vbif_dbg_bus(struct sde_dbg_vbif_debug_bus *bus) +{ + bool in_log, in_mem; + u32 **dump_mem = NULL; + u32 *dump_addr = NULL; + u32 value; + struct vbif_debug_bus_entry *head; + phys_addr_t phys = 0; + int i, list_size = 0; + void __iomem *mem_base = NULL; + struct vbif_debug_bus_entry *dbg_bus; + u32 bus_size; + struct sde_dbg_reg_base *reg_base; + + if (!bus || !bus->cmn.entries_size) + return; + + list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list, + reg_base_head) + if (strlen(reg_base->name) && + !strcmp(reg_base->name, bus->cmn.name)) + mem_base = reg_base->base; + + if (!mem_base) { + pr_err("unable to find mem_base for %s\n", bus->cmn.name); + return; + } + + dbg_bus = bus->entries; + bus_size = bus->cmn.entries_size; + list_size = bus->cmn.entries_size; + dump_mem = &bus->cmn.dumped_content; + + dev_info(sde_dbg_base.dev, "======== start %s dump =========\n", + bus->cmn.name); + + if (!dump_mem || !dbg_bus || !bus_size || !list_size) + return; + + /* allocate memory for each test point */ + for (i = 0; i < bus_size; i++) { + head = dbg_bus + i; + list_size += (head->block_cnt * head->test_pnt_cnt); + } + + /* 4 bytes * 4 entries for each test point*/ + list_size *= 16; + + in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG); + in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM); + + if (!in_log && !in_mem) + return; + + if (in_mem) { + if (!(*dump_mem)) + *dump_mem = dma_alloc_coherent(sde_dbg_base.dev, + list_size, &phys, GFP_KERNEL); + + if (*dump_mem) { + dump_addr = *dump_mem; + dev_info(sde_dbg_base.dev, + "%s: start_addr:0x%pK len:0x%x\n", + __func__, dump_addr, list_size); + } else { + in_mem = false; + pr_err("dump_mem: allocation fails\n"); + } + } + + _sde_dbg_enable_power(true); + + value = readl_relaxed(mem_base + MMSS_VBIF_CLKON); + writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON); + + /* make sure that vbif core is on */ + wmb(); + + for (i = 0; i < bus_size; i++) { + head = dbg_bus + i; + + writel_relaxed(0, mem_base + head->disable_bus_addr); + writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL); + /* make sure that other bus is off */ + wmb(); + + _sde_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr, + in_log); + if (dump_addr) + dump_addr += (head->block_cnt * head->test_pnt_cnt * 4); + } + + _sde_dbg_enable_power(false); + + dev_info(sde_dbg_base.dev, "======== end %s dump =========\n", + bus->cmn.name); +} + +/** + * _sde_dump_array - dump array of register bases + * @blk_arr: array of register base pointers + * @len: length of blk_arr + * @do_panic: whether to trigger a panic after dumping + * @name: string indicating origin of dump + * @dump_dbgbus_sde: whether to dump the sde debug bus + * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus + */ +static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[], + u32 len, bool do_panic, const char *name, bool dump_dbgbus_sde, + bool dump_dbgbus_vbif_rt) +{ + int i; + + for (i = 0; i < len; i++) { + if (blk_arr[i] != NULL) + _sde_dump_reg_by_ranges(blk_arr[i], + sde_dbg_base.enable_reg_dump); + } + + sde_evtlog_dump_all(sde_dbg_base.evtlog); + + if (dump_dbgbus_sde) + _sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde); + + if (dump_dbgbus_vbif_rt) + _sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt); + + if (do_panic && sde_dbg_base.panic_on_err) + panic(name); +} + +/** + * _sde_dump_work - deferred dump work function + * @work: work structure + */ +static void _sde_dump_work(struct work_struct *work) +{ + _sde_dump_array(sde_dbg_base.req_dump_blks, + ARRAY_SIZE(sde_dbg_base.req_dump_blks), + sde_dbg_base.work_panic, "evtlog_workitem", + sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work, + sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work); +} + +void sde_dbg_dump(bool queue_work, const char *name, ...) +{ + int i, index = 0; + bool do_panic = false; + bool dump_dbgbus_sde = false; + bool dump_dbgbus_vbif_rt = false; + va_list args; + char *blk_name = NULL; + struct sde_dbg_reg_base *blk_base = NULL; + struct sde_dbg_reg_base **blk_arr; + u32 blk_len; + + if (!sde_evtlog_is_enabled(sde_dbg_base.evtlog, SDE_EVTLOG_DEFAULT)) + return; + + if (queue_work && work_pending(&sde_dbg_base.dump_work)) + return; + + blk_arr = &sde_dbg_base.req_dump_blks[0]; + blk_len = ARRAY_SIZE(sde_dbg_base.req_dump_blks); + + memset(sde_dbg_base.req_dump_blks, 0, + sizeof(sde_dbg_base.req_dump_blks)); + + va_start(args, name); + i = 0; + while ((blk_name = va_arg(args, char*))) { + if (i++ >= SDE_EVTLOG_MAX_DATA) { + pr_err("could not parse all dump arguments\n"); + break; + } + if (IS_ERR_OR_NULL(blk_name)) + break; + + blk_base = _sde_dump_get_blk_addr(blk_name); + if (blk_base) { + if (index < blk_len) { + blk_arr[index] = blk_base; + index++; + } else { + pr_err("insufficient space to to dump %s\n", + blk_name); + } + } + + if (!strcmp(blk_name, "dbg_bus")) + dump_dbgbus_sde = true; + + if (!strcmp(blk_name, "vbif_dbg_bus")) + dump_dbgbus_vbif_rt = true; + + if (!strcmp(blk_name, "panic")) + do_panic = true; + } + va_end(args); + + if (queue_work) { + /* schedule work to dump later */ + sde_dbg_base.work_panic = do_panic; + sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work = + dump_dbgbus_sde; + sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work = + dump_dbgbus_vbif_rt; + schedule_work(&sde_dbg_base.dump_work); + } else { + _sde_dump_array(blk_arr, blk_len, do_panic, name, + dump_dbgbus_sde, dump_dbgbus_vbif_rt); + } +} + +/* + * sde_dbg_debugfs_open - debugfs open handler for evtlog dump + * @inode: debugfs inode + * @file: file handle + */ +static int sde_dbg_debugfs_open(struct inode *inode, struct file *file) +{ + /* non-seekable */ + file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + file->private_data = inode->i_private; + return 0; +} + +/** + * sde_evtlog_dump_read - debugfs read handler for evtlog dump + * @file: file handler + * @buff: user buffer content for debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff, + size_t count, loff_t *ppos) +{ + ssize_t len = 0; + char evtlog_buf[SDE_EVTLOG_BUF_MAX]; + + len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog, evtlog_buf, + SDE_EVTLOG_BUF_MAX); + if (copy_to_user(buff, evtlog_buf, len)) + return -EFAULT; + *ppos += len; + + return len; +} + +/** + * sde_evtlog_dump_write - debugfs write handler for evtlog dump + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_evtlog_dump_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + _sde_dump_reg_all(); + + sde_evtlog_dump_all(sde_dbg_base.evtlog); + + _sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde); + _sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt); + + if (sde_dbg_base.panic_on_err) + panic("sde"); + + return count; +} + +static const struct file_operations sde_evtlog_fops = { + .open = sde_dbg_debugfs_open, + .read = sde_evtlog_dump_read, + .write = sde_evtlog_dump_write, +}; + +void sde_dbg_init_dbg_buses(u32 hwversion) +{ + static struct sde_dbg_base *dbg = &sde_dbg_base; + char debug_name[80] = ""; + + memset(&dbg->dbgbus_sde, 0, sizeof(dbg->dbgbus_sde)); + memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt)); + + switch (hwversion) { + case SDE_HW_VER_300: + case SDE_HW_VER_301: + dbg->dbgbus_sde.entries = dbg_bus_sde_8998; + dbg->dbgbus_sde.cmn.entries_size = ARRAY_SIZE(dbg_bus_sde_8998); + dbg->dbgbus_sde.cmn.flags = DBGBUS_FLAGS_DSPP; + + dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998; + dbg->dbgbus_vbif_rt.cmn.entries_size = + ARRAY_SIZE(vbif_dbg_bus_msm8998); + break; + default: + pr_err("unsupported chipset id %u\n", hwversion); + break; + } + + if (dbg->dbgbus_sde.entries) { + dbg->dbgbus_sde.cmn.name = DBGBUS_NAME_SDE; + snprintf(debug_name, sizeof(debug_name), "%s_dbgbus", + dbg->dbgbus_sde.cmn.name); + dbg->dbgbus_sde.cmn.enable_mask = DEFAULT_DBGBUS_SDE; + debugfs_create_u32(debug_name, 0600, dbg->root, + &dbg->dbgbus_sde.cmn.enable_mask); + } + + if (dbg->dbgbus_vbif_rt.entries) { + dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT; + snprintf(debug_name, sizeof(debug_name), "%s_dbgbus", + dbg->dbgbus_vbif_rt.cmn.name); + dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT; + debugfs_create_u32(debug_name, 0600, dbg->root, + &dbg->dbgbus_vbif_rt.cmn.enable_mask); + } +} + +int sde_dbg_init(struct dentry *debugfs_root, struct device *dev, + struct sde_dbg_power_ctrl *power_ctrl) +{ + int i; + + INIT_LIST_HEAD(&sde_dbg_base.reg_base_list); + sde_dbg_base.dev = dev; + sde_dbg_base.power_ctrl = *power_ctrl; + + + sde_dbg_base.evtlog = sde_evtlog_init(); + if (IS_ERR_OR_NULL(sde_dbg_base.evtlog)) + return PTR_ERR(sde_dbg_base.evtlog); + + sde_dbg_base_evtlog = sde_dbg_base.evtlog; + + sde_dbg_base.root = debugfs_create_dir("evt_dbg", debugfs_root); + if (IS_ERR_OR_NULL(sde_dbg_base.root)) { + pr_err("debugfs_create_dir fail, error %ld\n", + PTR_ERR(sde_dbg_base.root)); + sde_dbg_base.root = NULL; + return -ENODEV; + } + + INIT_WORK(&sde_dbg_base.dump_work, _sde_dump_work); + sde_dbg_base.work_panic = false; + + for (i = 0; i < SDE_EVTLOG_ENTRY; i++) + sde_dbg_base.evtlog->logs[i].counter = i; + + debugfs_create_file("dump", 0600, sde_dbg_base.root, NULL, + &sde_evtlog_fops); + debugfs_create_u32("enable", 0600, sde_dbg_base.root, + &(sde_dbg_base.evtlog->enable)); + debugfs_create_u32("panic", 0600, sde_dbg_base.root, + &sde_dbg_base.panic_on_err); + debugfs_create_u32("reg_dump", 0600, sde_dbg_base.root, + &sde_dbg_base.enable_reg_dump); + + sde_dbg_base.panic_on_err = DEFAULT_PANIC; + sde_dbg_base.enable_reg_dump = DEFAULT_REGDUMP; + + pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n", + sde_dbg_base.evtlog->enable, sde_dbg_base.panic_on_err, + sde_dbg_base.enable_reg_dump); + + return 0; +} + +/** + * sde_dbg_destroy - destroy sde debug facilities + */ +void sde_dbg_destroy(void) +{ + debugfs_remove_recursive(sde_dbg_base.root); + sde_dbg_base.root = NULL; + + sde_dbg_base_evtlog = NULL; + sde_evtlog_destroy(sde_dbg_base.evtlog); + sde_dbg_base.evtlog = NULL; +} + +/** + * sde_dbg_reg_base_release - release allocated reg dump file private data + * @inode: debugfs inode + * @file: file handle + * @Return: 0 on success + */ +static int sde_dbg_reg_base_release(struct inode *inode, struct file *file) +{ + struct sde_dbg_reg_base *dbg = file->private_data; + + if (dbg && dbg->buf) { + kfree(dbg->buf); + dbg->buf_len = 0; + dbg->buf = NULL; + } + return 0; +} + + +/** + * sde_dbg_reg_base_offset_write - set new offset and len to debugfs reg base + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_dbg_reg_base_offset_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct sde_dbg_reg_base *dbg = file->private_data; + u32 off = 0; + u32 cnt = DEFAULT_BASE_REG_CNT; + char buf[24]; + + if (!dbg) + return -ENODEV; + + if (count >= sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + if (sscanf(buf, "%x %x", &off, &cnt) != 2) + return -EFAULT; + + if (off > dbg->max_offset) + return -EINVAL; + + if (off % sizeof(u32)) + return -EINVAL; + + if (cnt > (dbg->max_offset - off)) + cnt = dbg->max_offset - off; + + if (cnt % sizeof(u32)) + return -EINVAL; + + dbg->off = off; + dbg->cnt = cnt; + + pr_debug("offset=%x cnt=%x\n", off, cnt); + + return count; +} + +/** + * sde_dbg_reg_base_offset_read - read current offset and len of register base + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_dbg_reg_base_offset_read(struct file *file, + char __user *buff, size_t count, loff_t *ppos) +{ + struct sde_dbg_reg_base *dbg = file->private_data; + int len = 0; + char buf[24] = {'\0'}; + + if (!dbg) + return -ENODEV; + + if (*ppos) + return 0; /* the end */ + + len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt); + if (len < 0 || len >= sizeof(buf)) + return 0; + + if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) + return -EFAULT; + + *ppos += len; /* increase offset */ + + return len; +} + +/** + * sde_dbg_reg_base_reg_write - write to reg base hw at offset a given value + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_dbg_reg_base_reg_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct sde_dbg_reg_base *dbg = file->private_data; + size_t off; + u32 data, cnt; + char buf[24]; + + if (!dbg) + return -ENODEV; + + if (count >= sizeof(buf)) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + buf[count] = 0; /* end of string */ + + cnt = sscanf(buf, "%zx %x", &off, &data); + + if (cnt < 2) + return -EFAULT; + + if (off >= dbg->max_offset) + return -EFAULT; + + _sde_dbg_enable_power(true); + + writel_relaxed(data, dbg->base + off); + + _sde_dbg_enable_power(false); + + pr_debug("addr=%zx data=%x\n", off, data); + + return count; +} + +/** + * sde_dbg_reg_base_reg_read - read len from reg base hw at current offset + * @file: file handler + * @user_buf: user buffer content from debugfs + * @count: size of user buffer + * @ppos: position offset of user buffer + */ +static ssize_t sde_dbg_reg_base_reg_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + struct sde_dbg_reg_base *dbg = file->private_data; + size_t len; + + if (!dbg) { + pr_err("invalid handle\n"); + return -ENODEV; + } + + if (!dbg->buf) { + char *hwbuf; + char dump_buf[64]; + char __iomem *ioptr; + int cnt, tot; + + dbg->buf_len = sizeof(dump_buf) * + DIV_ROUND_UP(dbg->cnt, ROW_BYTES); + + if (dbg->buf_len % sizeof(u32)) + return -EINVAL; + + dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL); + + if (!dbg->buf) + return -ENOMEM; + + hwbuf = kzalloc(ROW_BYTES, GFP_KERNEL); + if (!hwbuf) { + kfree(dbg->buf); + return -ENOMEM; + } + + ioptr = dbg->base + dbg->off; + tot = 0; + _sde_dbg_enable_power(true); + + for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) { + memcpy_fromio(hwbuf, ioptr, ROW_BYTES); + hex_dump_to_buffer(hwbuf, + min(cnt, ROW_BYTES), + ROW_BYTES, GROUP_BYTES, dump_buf, + sizeof(dump_buf), false); + len = scnprintf(dbg->buf + tot, dbg->buf_len - tot, + "0x%08x: %s\n", + ((int) (unsigned long) ioptr) - + ((int) (unsigned long) dbg->base), + dump_buf); + + ioptr += ROW_BYTES; + tot += len; + if (tot >= dbg->buf_len) + break; + } + + _sde_dbg_enable_power(false); + + dbg->buf_len = tot; + kfree(hwbuf); + } + + if (*ppos >= dbg->buf_len) + return 0; /* done reading */ + + len = min(count, dbg->buf_len - (size_t) *ppos); + if (copy_to_user(user_buf, dbg->buf + *ppos, len)) { + pr_err("failed to copy to user\n"); + return -EFAULT; + } + + *ppos += len; /* increase offset */ + + return len; +} + +static const struct file_operations sde_off_fops = { + .open = sde_dbg_debugfs_open, + .release = sde_dbg_reg_base_release, + .read = sde_dbg_reg_base_offset_read, + .write = sde_dbg_reg_base_offset_write, +}; + +static const struct file_operations sde_reg_fops = { + .open = sde_dbg_debugfs_open, + .release = sde_dbg_reg_base_release, + .read = sde_dbg_reg_base_reg_read, + .write = sde_dbg_reg_base_reg_write, +}; + +int sde_dbg_reg_register_base(const char *name, void __iomem *base, + size_t max_offset) +{ + struct sde_dbg_base *dbg_base = &sde_dbg_base; + struct sde_dbg_reg_base *reg_base; + struct dentry *ent_off, *ent_reg; + char dn[80] = ""; + int prefix_len = 0; + + reg_base = kzalloc(sizeof(*reg_base), GFP_KERNEL); + if (!reg_base) + return -ENOMEM; + + if (name) + strlcpy(reg_base->name, name, sizeof(reg_base->name)); + reg_base->base = base; + reg_base->max_offset = max_offset; + reg_base->off = 0; + reg_base->cnt = DEFAULT_BASE_REG_CNT; + reg_base->reg_dump = NULL; + + if (name) + prefix_len = snprintf(dn, sizeof(dn), "%s_", name); + strlcpy(dn + prefix_len, "off", sizeof(dn) - prefix_len); + ent_off = debugfs_create_file(dn, 0600, dbg_base->root, reg_base, + &sde_off_fops); + if (IS_ERR_OR_NULL(ent_off)) { + pr_err("debugfs_create_file: offset fail\n"); + goto off_fail; + } + + strlcpy(dn + prefix_len, "reg", sizeof(dn) - prefix_len); + ent_reg = debugfs_create_file(dn, 0600, dbg_base->root, reg_base, + &sde_reg_fops); + if (IS_ERR_OR_NULL(ent_reg)) { + pr_err("debugfs_create_file: reg fail\n"); + goto reg_fail; + } + + /* Initialize list to make sure check for null list will be valid */ + INIT_LIST_HEAD(®_base->sub_range_list); + + pr_debug("%s base: %pK max_offset 0x%zX\n", reg_base->name, + reg_base->base, reg_base->max_offset); + + list_add(®_base->reg_base_head, &dbg_base->reg_base_list); + + return 0; +reg_fail: + debugfs_remove(ent_off); +off_fail: + kfree(reg_base); + return -ENODEV; +} + +void sde_dbg_reg_register_dump_range(const char *base_name, + const char *range_name, u32 offset_start, u32 offset_end, + uint32_t xin_id) +{ + struct sde_dbg_reg_base *reg_base; + struct sde_dbg_reg_range *range; + + reg_base = _sde_dump_get_blk_addr(base_name); + if (!reg_base) { + pr_err("error: for range %s unable to locate base %s\n", + range_name, base_name); + return; + } + + if (!range_name || strlen(range_name) == 0) { + pr_err("%pS: bad range name, base_name %s, offset_start 0x%X, end 0x%X\n", + __builtin_return_address(0), base_name, + offset_start, offset_end); + return; + } + + if (offset_end - offset_start < REG_DUMP_ALIGN || + offset_start > offset_end) { + pr_err("%pS: bad range, base_name %s, range_name %s, offset_start 0x%X, end 0x%X\n", + __builtin_return_address(0), base_name, + range_name, offset_start, offset_end); + return; + } + + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (!range) + return; + + strlcpy(range->range_name, range_name, sizeof(range->range_name)); + range->offset.start = offset_start; + range->offset.end = offset_end; + range->xin_id = xin_id; + list_add_tail(&range->head, ®_base->sub_range_list); + + pr_debug("base %s, range %s, start 0x%X, end 0x%X\n", + base_name, range->range_name, + range->offset.start, range->offset.end); +} + +void sde_dbg_set_sde_top_offset(u32 blk_off) +{ + sde_dbg_base.dbgbus_sde.top_blk_off = blk_off; +} diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h index 271c41f05ce5..74fd4c94b490 100644 --- a/drivers/gpu/drm/msm/sde_dbg.h +++ b/drivers/gpu/drm/msm/sde_dbg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -29,34 +29,288 @@ enum sde_dbg_evtlog_flag { SDE_EVTLOG_ALL = BIT(7) }; +enum sde_dbg_dump_flag { + SDE_DBG_DUMP_IN_LOG = BIT(0), + SDE_DBG_DUMP_IN_MEM = BIT(1), +}; + +#ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG +#define SDE_EVTLOG_DEFAULT_ENABLE 1 +#else +#define SDE_EVTLOG_DEFAULT_ENABLE 0 +#endif + +/* + * evtlog will print this number of entries when it is called through + * sysfs node or panic. This prevents kernel log from evtlog message + * flood. + */ +#define SDE_EVTLOG_PRINT_ENTRY 256 + +/* + * evtlog keeps this number of entries in memory for debug purpose. This + * number must be greater than print entry to prevent out of bound evtlog + * entry array access. + */ +#define SDE_EVTLOG_ENTRY (SDE_EVTLOG_PRINT_ENTRY * 4) +#define SDE_EVTLOG_MAX_DATA 15 +#define SDE_EVTLOG_BUF_MAX 512 +#define SDE_EVTLOG_BUF_ALIGN 32 + +struct sde_dbg_power_ctrl { + void *handle; + void *client; + int (*enable_fn)(void *handle, void *client, bool enable); +}; + +struct sde_dbg_evtlog_log { + u32 counter; + s64 time; + const char *name; + int line; + u32 data[SDE_EVTLOG_MAX_DATA]; + u32 data_cnt; + int pid; +}; + +struct sde_dbg_evtlog { + struct sde_dbg_evtlog_log logs[SDE_EVTLOG_ENTRY]; + u32 first; + u32 last; + u32 curr; + u32 next; + u32 enable; + spinlock_t spin_lock; +}; + +extern struct sde_dbg_evtlog *sde_dbg_base_evtlog; + +/** + * SDE_EVT32 - Write a list of 32bit values to the event log, default area + * ... - variable arguments + */ +#define SDE_EVT32(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \ + __LINE__, SDE_EVTLOG_DEFAULT, ##__VA_ARGS__, \ + SDE_EVTLOG_DATA_LIMITER) + /** - * SDE_EVT32 - Write an list of 32bit values as an event into the event log + * SDE_EVT32_IRQ - Write a list of 32bit values to the event log, IRQ area * ... - variable arguments */ -#define SDE_EVT32(...) sde_evtlog(__func__, __LINE__, SDE_EVTLOG_DEFAULT, \ - ##__VA_ARGS__, SDE_EVTLOG_DATA_LIMITER) -#define SDE_EVT32_IRQ(...) sde_evtlog(__func__, __LINE__, SDE_EVTLOG_IRQ, \ - ##__VA_ARGS__, SDE_EVTLOG_DATA_LIMITER) +#define SDE_EVT32_IRQ(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \ + __LINE__, SDE_EVTLOG_IRQ, ##__VA_ARGS__, \ + SDE_EVTLOG_DATA_LIMITER) -#define SDE_DBG_DUMP(...) \ - sde_dbg_dump(false, __func__, ##__VA_ARGS__, \ +/** + * SDE_DBG_DUMP - trigger dumping of all sde_dbg facilities + * @va_args: list of named register dump ranges and regions to dump, as + * registered previously through sde_dbg_reg_register_base and + * sde_dbg_reg_register_dump_range. + * Including the special name "panic" will trigger a panic after + * the dumping work has completed. + */ +#define SDE_DBG_DUMP(...) sde_dbg_dump(false, __func__, ##__VA_ARGS__, \ SDE_DBG_DUMP_DATA_LIMITER) -#define SDE_DBG_DUMP_WQ(...) \ - sde_dbg_dump(true, __func__, ##__VA_ARGS__, \ +/** + * SDE_DBG_DUMP_WQ - trigger dumping of all sde_dbg facilities, queuing the work + * @va_args: list of named register dump ranges and regions to dump, as + * registered previously through sde_dbg_reg_register_base and + * sde_dbg_reg_register_dump_range. + * Including the special name "panic" will trigger a panic after + * the dumping work has completed. + */ +#define SDE_DBG_DUMP_WQ(...) sde_dbg_dump(true, __func__, ##__VA_ARGS__, \ SDE_DBG_DUMP_DATA_LIMITER) #if defined(CONFIG_DEBUG_FS) -int sde_evtlog_init(struct dentry *debugfs_root); -void sde_evtlog_destroy(void); -void sde_evtlog(const char *name, int line, int flag, ...); -void sde_dbg_dump(bool queue, const char *name, ...); +/** + * sde_evtlog_init - allocate a new event log object + * Returns: evtlog or -ERROR + */ +struct sde_dbg_evtlog *sde_evtlog_init(void); + +/** + * sde_evtlog_destroy - destroy previously allocated event log + * @evtlog: pointer to evtlog + * Returns: none + */ +void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog); + +/** + * sde_evtlog_log - log an entry into the event log. + * log collection may be enabled/disabled entirely via debugfs + * log area collection may be filtered by user provided flags via debugfs. + * @evtlog: pointer to evtlog + * @name: function name of call site + * @line: line number of call site + * @flag: log area filter flag checked against user's debugfs request + * Returns: none + */ +void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line, + int flag, ...); + +/** + * sde_evtlog_dump_all - print all entries in event log to kernel log + * @evtlog: pointer to evtlog + * Returns: none + */ +void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog); + +/** + * sde_evtlog_is_enabled - check whether log collection is enabled for given + * event log and log area flag + * @evtlog: pointer to evtlog + * @flag: log area filter flag + * Returns: none + */ +bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag); + +/** + * sde_evtlog_dump_to_buffer - print content of event log to the given buffer + * @evtlog: pointer to evtlog + * @evtlog_buf: target buffer to print into + * @evtlog_buf_size: size of target buffer + * Returns: number of bytes written to buffer + */ +ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog, + char *evtlog_buf, ssize_t evtlog_buf_size); + +/** + * sde_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset + * @hwversion: Chipset revision + */ +void sde_dbg_init_dbg_buses(u32 hwversion); + +/** + * sde_dbg_init - initialize global sde debug facilities: evtlog, regdump + * @debugfs_root: debugfs root in which to create sde debug entries + * @dev: device handle + * @power_ctrl: power control callback structure for enabling clocks + * during register dumping + * Returns: 0 or -ERROR + */ +int sde_dbg_init(struct dentry *debugfs_root, struct device *dev, + struct sde_dbg_power_ctrl *power_ctrl); + +/** + * sde_dbg_destroy - destroy the global sde debug facilities + * Returns: none + */ +void sde_dbg_destroy(void); + +/** + * sde_dbg_dump - trigger dumping of all sde_dbg facilities + * @queue_work: whether to queue the dumping work to the work_struct + * @name: string indicating origin of dump + * @va_args: list of named register dump ranges and regions to dump, as + * registered previously through sde_dbg_reg_register_base and + * sde_dbg_reg_register_dump_range. + * Including the special name "panic" will trigger a panic after + * the dumping work has completed. + * Returns: none + */ +void sde_dbg_dump(bool queue_work, const char *name, ...); + +/** + * sde_dbg_reg_register_base - register a hw register address section for later + * dumping. call this before calling sde_dbg_reg_register_dump_range + * to be able to specify sub-ranges within the base hw range. + * @name: name of base region + * @base: base pointer of region + * @max_offset: length of region + * Returns: 0 or -ERROR + */ +int sde_dbg_reg_register_base(const char *name, void __iomem *base, + size_t max_offset); + +/** + * sde_dbg_reg_register_dump_range - register a hw register sub-region for + * later register dumping associated with base specified by + * sde_dbg_reg_register_base + * @base_name: name of base region + * @range_name: name of sub-range within base region + * @offset_start: sub-range's start offset from base's base pointer + * @offset_end: sub-range's end offset from base's base pointer + * @xin_id: xin id + * Returns: none + */ +void sde_dbg_reg_register_dump_range(const char *base_name, + const char *range_name, u32 offset_start, u32 offset_end, + uint32_t xin_id); + +/** + * sde_dbg_set_sde_top_offset - set the target specific offset from mdss base + * address of the top registers. Used for accessing debug bus controls. + * @blk_off: offset from mdss base of the top block + */ +void sde_dbg_set_sde_top_offset(u32 blk_off); #else -static inline int sde_evtlog_init(struct dentry *debugfs_root) { return 0; } -static inline void sde_evtlog(const char *name, int line, flag, ...) {} -static inline void sde_evtlog_destroy(void) { } -static inline void sde_dbg_dump(bool queue, const char *name, ...) {} -#endif +static inline struct sde_dbg_evtlog *sde_evtlog_init(void) +{ + return NULL; +} + +static inline void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog) +{ +} + +static inline void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, + const char *name, int line, int flag, ...) +{ +} + +static inline void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog) +{ +} + +static inline bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, + u32 flag) +{ + return false; +} + +static inline ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog, + char *evtlog_buf, ssize_t evtlog_buf_size) +{ + return 0; +} + +void sde_dbg_init_dbg_buses(u32 hwversion) +{ +} + +static inline int sde_dbg_init(struct dentry *debugfs_root, struct device *dev, + struct sde_dbg_power_ctrl *power_ctrl) +{ + return 0; +} + +static inline void sde_dbg_destroy(void) +{ +} + +static inline void sde_dbg_dump(bool queue_work, const char *name, ...) +{ +} + +static inline int sde_dbg_reg_register_base(const char *name, + void __iomem *base, size_t max_offset) +{ + return 0; +} + +static inline void sde_dbg_reg_register_dump_range(const char *base_name, + const char *range_name, u32 offset_start, u32 offset_end, + uint32_t xin_id) +{ +} + +void sde_dbg_set_sde_top_offset(u32 blk_off) +{ +} +#endif /* defined(CONFIG_DEBUG_FS) */ + #endif /* SDE_DBG_H_ */ diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c index 72832776659d..759bdab48840 100644 --- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c +++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -10,7 +10,7 @@ * GNU General Public License for more details. */ -#define pr_fmt(fmt) "sde_evtlog:[%s] " fmt, __func__ +#define pr_fmt(fmt) "sde_dbg:[%s] " fmt, __func__ #include <linux/delay.h> #include <linux/spinlock.h> @@ -18,77 +18,36 @@ #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/dma-buf.h> +#include <linux/slab.h> #include "sde_dbg.h" #include "sde_trace.h" -#ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG -#define SDE_EVTLOG_DEFAULT_ENABLE 1 -#else -#define SDE_EVTLOG_DEFAULT_ENABLE 0 -#endif - -#define SDE_DBG_DEFAULT_PANIC 1 - -/* - * evtlog will print this number of entries when it is called through - * sysfs node or panic. This prevents kernel log from evtlog message - * flood. - */ -#define SDE_EVTLOG_PRINT_ENTRY 256 - -/* - * evtlog keeps this number of entries in memory for debug purpose. This - * number must be greater than print entry to prevent out of bound evtlog - * entry array access. - */ -#define SDE_EVTLOG_ENTRY (SDE_EVTLOG_PRINT_ENTRY * 4) -#define SDE_EVTLOG_MAX_DATA 15 -#define SDE_EVTLOG_BUF_MAX 512 -#define SDE_EVTLOG_BUF_ALIGN 32 - -DEFINE_SPINLOCK(sde_evtloglock); - -struct tlog { - u32 counter; - s64 time; - const char *name; - int line; - u32 data[SDE_EVTLOG_MAX_DATA]; - u32 data_cnt; - int pid; -}; - -static struct sde_dbg_evtlog { - struct tlog logs[SDE_EVTLOG_ENTRY]; - u32 first; - u32 last; - u32 curr; - struct dentry *evtlog; - u32 evtlog_enable; - u32 panic_on_err; - struct work_struct evtlog_dump_work; - bool work_panic; -} sde_dbg_evtlog; - -static inline bool sde_evtlog_is_enabled(u32 flag) +bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag) { - return (flag & sde_dbg_evtlog.evtlog_enable) || - (flag == SDE_EVTLOG_ALL && sde_dbg_evtlog.evtlog_enable); + if (!evtlog) + return false; + + return (flag & evtlog->enable) || + (flag == SDE_EVTLOG_ALL && evtlog->enable); } -void sde_evtlog(const char *name, int line, int flag, ...) +void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line, + int flag, ...) { unsigned long flags; int i, val = 0; va_list args; - struct tlog *log; + struct sde_dbg_evtlog_log *log; + + if (!evtlog) + return; - if (!sde_evtlog_is_enabled(flag)) + if (!sde_evtlog_is_enabled(evtlog, flag)) return; - spin_lock_irqsave(&sde_evtloglock, flags); - log = &sde_dbg_evtlog.logs[sde_dbg_evtlog.curr]; + spin_lock_irqsave(&evtlog->spin_lock, flags); + log = &evtlog->logs[evtlog->curr]; log->time = ktime_to_us(ktime_get()); log->name = name; log->line = line; @@ -106,26 +65,27 @@ void sde_evtlog(const char *name, int line, int flag, ...) } va_end(args); log->data_cnt = i; - sde_dbg_evtlog.curr = (sde_dbg_evtlog.curr + 1) % SDE_EVTLOG_ENTRY; - sde_dbg_evtlog.last++; + evtlog->curr = (evtlog->curr + 1) % SDE_EVTLOG_ENTRY; + evtlog->last++; trace_sde_evtlog(name, line, i > 0 ? log->data[0] : 0, i > 1 ? log->data[1] : 0); - spin_unlock_irqrestore(&sde_evtloglock, flags); + spin_unlock_irqrestore(&evtlog->spin_lock, flags); } /* always dump the last entries which are not dumped yet */ -static bool _sde_evtlog_dump_calc_range(void) +static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog) { - static u32 next; bool need_dump = true; unsigned long flags; - struct sde_dbg_evtlog *evtlog = &sde_dbg_evtlog; - spin_lock_irqsave(&sde_evtloglock, flags); + if (!evtlog) + return false; + + spin_lock_irqsave(&evtlog->spin_lock, flags); - evtlog->first = next; + evtlog->first = evtlog->next; if (evtlog->last == evtlog->first) { need_dump = false; @@ -143,27 +103,34 @@ static bool _sde_evtlog_dump_calc_range(void) evtlog->last - evtlog->first); evtlog->first = evtlog->last - SDE_EVTLOG_PRINT_ENTRY; } - next = evtlog->first + 1; + evtlog->next = evtlog->first + 1; dump_exit: - spin_unlock_irqrestore(&sde_evtloglock, flags); + spin_unlock_irqrestore(&evtlog->spin_lock, flags); return need_dump; } -static ssize_t sde_evtlog_dump_entry(char *evtlog_buf, ssize_t evtlog_buf_size) +ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog, + char *evtlog_buf, ssize_t evtlog_buf_size) { int i; ssize_t off = 0; - struct tlog *log, *prev_log; + struct sde_dbg_evtlog_log *log, *prev_log; unsigned long flags; - spin_lock_irqsave(&sde_evtloglock, flags); + if (!evtlog || !evtlog_buf) + return 0; - log = &sde_dbg_evtlog.logs[sde_dbg_evtlog.first % - SDE_EVTLOG_ENTRY]; + /* update markers, exit if nothing to print */ + if (!_sde_evtlog_dump_calc_range(evtlog)) + return 0; + + spin_lock_irqsave(&evtlog->spin_lock, flags); - prev_log = &sde_dbg_evtlog.logs[(sde_dbg_evtlog.first - 1) % + log = &evtlog->logs[evtlog->first % SDE_EVTLOG_ENTRY]; + + prev_log = &evtlog->logs[(evtlog->first - 1) % SDE_EVTLOG_ENTRY]; off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d", @@ -175,7 +142,7 @@ static ssize_t sde_evtlog_dump_entry(char *evtlog_buf, ssize_t evtlog_buf_size) } off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), - "=>[%-8d:%-11llu:%9llu][%-4d]:", sde_dbg_evtlog.first, + "=>[%-8d:%-11llu:%9llu][%-4d]:", evtlog->first, log->time, (log->time - prev_log->time), log->pid); for (i = 0; i < log->data_cnt; i++) @@ -184,143 +151,37 @@ static ssize_t sde_evtlog_dump_entry(char *evtlog_buf, ssize_t evtlog_buf_size) off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n"); - spin_unlock_irqrestore(&sde_evtloglock, flags); + spin_unlock_irqrestore(&evtlog->spin_lock, flags); return off; } -static void _sde_evtlog_dump_all(void) -{ - char evtlog_buf[SDE_EVTLOG_BUF_MAX]; - - while (_sde_evtlog_dump_calc_range()) { - sde_evtlog_dump_entry(evtlog_buf, SDE_EVTLOG_BUF_MAX); - pr_info("%s", evtlog_buf); - } -} - -static void _sde_dump_array(bool dead, const char *name) -{ - _sde_evtlog_dump_all(); - - if (dead && sde_dbg_evtlog.panic_on_err) - panic(name); -} - -static void _sde_dump_work(struct work_struct *work) +void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog) { - _sde_dump_array(sde_dbg_evtlog.work_panic, "evtlog_workitem"); -} - -void sde_dbg_dump(bool queue, const char *name, ...) -{ - int i; - bool dead = false; - va_list args; - char *blk_name = NULL; - - if (!sde_evtlog_is_enabled(SDE_EVTLOG_DEFAULT)) - return; + char buf[SDE_EVTLOG_BUF_MAX]; - if (queue && work_pending(&sde_dbg_evtlog.evtlog_dump_work)) + if (!evtlog) return; - va_start(args, name); - for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) { - blk_name = va_arg(args, char*); - if (IS_ERR_OR_NULL(blk_name)) - break; - - if (!strcmp(blk_name, "panic")) - dead = true; - } - va_end(args); - - if (queue) { - /* schedule work to dump later */ - sde_dbg_evtlog.work_panic = dead; - schedule_work(&sde_dbg_evtlog.evtlog_dump_work); - } else { - _sde_dump_array(dead, name); - } + while (sde_evtlog_dump_to_buffer(evtlog, buf, sizeof(buf))) + pr_info("%s", buf); } -static int sde_evtlog_dump_open(struct inode *inode, struct file *file) +struct sde_dbg_evtlog *sde_evtlog_init(void) { - /* non-seekable */ - file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - file->private_data = inode->i_private; - return 0; -} - -static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff, - size_t count, loff_t *ppos) -{ - ssize_t len = 0; - char evtlog_buf[SDE_EVTLOG_BUF_MAX]; - - if (_sde_evtlog_dump_calc_range()) { - len = sde_evtlog_dump_entry(evtlog_buf, SDE_EVTLOG_BUF_MAX); - if (copy_to_user(buff, evtlog_buf, len)) - return -EFAULT; - *ppos += len; - } - - return len; -} - -static ssize_t sde_evtlog_dump_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - _sde_evtlog_dump_all(); - - if (sde_dbg_evtlog.panic_on_err) - panic("sde"); - - return count; -} - -static const struct file_operations sde_evtlog_fops = { - .open = sde_evtlog_dump_open, - .read = sde_evtlog_dump_read, - .write = sde_evtlog_dump_write, -}; - -int sde_evtlog_init(struct dentry *debugfs_root) -{ - int i; - - sde_dbg_evtlog.evtlog = debugfs_create_dir("evt_dbg", debugfs_root); - if (IS_ERR_OR_NULL(sde_dbg_evtlog.evtlog)) { - pr_err("debugfs_create_dir fail, error %ld\n", - PTR_ERR(sde_dbg_evtlog.evtlog)); - sde_dbg_evtlog.evtlog = NULL; - return -ENODEV; - } - - INIT_WORK(&sde_dbg_evtlog.evtlog_dump_work, _sde_dump_work); - sde_dbg_evtlog.work_panic = false; - - for (i = 0; i < SDE_EVTLOG_ENTRY; i++) - sde_dbg_evtlog.logs[i].counter = i; - - debugfs_create_file("dump", 0644, sde_dbg_evtlog.evtlog, NULL, - &sde_evtlog_fops); - debugfs_create_u32("enable", 0644, sde_dbg_evtlog.evtlog, - &sde_dbg_evtlog.evtlog_enable); - debugfs_create_u32("panic", 0644, sde_dbg_evtlog.evtlog, - &sde_dbg_evtlog.panic_on_err); + struct sde_dbg_evtlog *evtlog; - sde_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE; - sde_dbg_evtlog.panic_on_err = SDE_DBG_DEFAULT_PANIC; + evtlog = kzalloc(sizeof(*evtlog), GFP_KERNEL); + if (!evtlog) + return ERR_PTR(-ENOMEM); - pr_info("evtlog_status: enable:%d, panic:%d\n", - sde_dbg_evtlog.evtlog_enable, sde_dbg_evtlog.panic_on_err); + spin_lock_init(&evtlog->spin_lock); + evtlog->enable = SDE_EVTLOG_DEFAULT_ENABLE; - return 0; + return evtlog; } -void sde_evtlog_destroy(void) +void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog) { - debugfs_remove(sde_dbg_evtlog.evtlog); + kfree(evtlog); } diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c index d08cf13c448d..c2d29a084c7f 100644 --- a/drivers/gpu/drm/msm/sde_hdcp_1x.c +++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c @@ -1201,9 +1201,16 @@ static int sde_hdcp_1x_authentication_part2(struct sde_hdcp_1x *hdcp) if (rc) goto error; - /* do not proceed further if no device connected */ - if (!hdcp->current_tp.dev_count) + /* + * Do not proceed further if no device connected + * If no downstream devices are attached to the repeater + * then part II fails. + */ + + if (!hdcp->current_tp.dev_count) { + rc = -EINVAL; goto error; + } rc = sde_hdcp_1x_write_ksv_fifo(hdcp); } while (--v_retry && rc); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 9255b9c096b6..9befd624a5f0 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -148,8 +148,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0); /* Signal polarities */ - value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL) - | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL) + value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0) + | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0) | DSMR_DIPM_DE | DSMR_CSPM; rcar_du_crtc_write(rcrtc, DSMR, value); @@ -171,7 +171,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) mode->crtc_vsync_start - 1); rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1); - rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start); + rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1); rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index bf4674aa6405..bb9cd35d7fdf 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -296,7 +296,7 @@ static int rcar_du_probe(struct platform_device *pdev) mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(rcdu->mmio)) - ret = PTR_ERR(rcdu->mmio); + return PTR_ERR(rcdu->mmio); /* DRM/KMS objects */ ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 46429c4be8e5..2b75a4891dec 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -642,13 +642,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, } ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector); - of_node_put(encoder); - of_node_put(connector); - if (ret && ret != -EPROBE_DEFER) dev_warn(rcdu->dev, - "failed to initialize encoder %s (%d), skipping\n", - encoder->full_name, ret); + "failed to initialize encoder %s on output %u (%d), skipping\n", + of_node_full_name(encoder), output, ret); + + of_node_put(encoder); + of_node_put(connector); return ret; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c index 85043c5bad03..873e04aa9352 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c @@ -56,11 +56,11 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, return ret; /* PLL clock configuration */ - if (freq <= 38000) + if (freq < 39000) pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M; - else if (freq <= 60000) + else if (freq < 61000) pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M; - else if (freq <= 121000) + else if (freq < 121000) pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M; else pllcr = LVDPLLCR_PLLDLYCNT_150M; @@ -102,7 +102,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds, /* Turn the PLL on, wait for the startup delay, and turn the output * on. */ - lvdcr0 |= LVDCR0_PLLEN; + lvdcr0 |= LVDCR0_PLLON; rcar_lvds_write(lvds, LVDCR0, lvdcr0); usleep_range(100, 150); diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h index 77cf9289ab65..b1eafd097a79 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h @@ -18,7 +18,7 @@ #define LVDCR0_DMD (1 << 12) #define LVDCR0_LVMD_MASK (0xf << 8) #define LVDCR0_LVMD_SHIFT 8 -#define LVDCR0_PLLEN (1 << 4) +#define LVDCR0_PLLON (1 << 4) #define LVDCR0_BEN (1 << 2) #define LVDCR0_LVEN (1 << 1) #define LVDCR0_LVRES (1 << 0) diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index 6a81e084593b..2b59d80a09b8 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c @@ -338,7 +338,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, info->fbops = &virtio_gpufb_ops; info->pixmap.flags = FB_PIXMAP_SYSTEM; - info->screen_base = obj->vmap; + info->screen_buffer = obj->vmap; info->screen_size = obj->gem_base.size; drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); drm_fb_helper_fill_var(info, &vfbdev->helper, diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 7cab049771de..c620c7ac1afa 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -807,13 +807,13 @@ static int adreno_of_get_pwrlevels(struct adreno_device *adreno_dev, struct device_node *parent) { struct device_node *node, *child; + unsigned int bin = 0; node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins"); if (node == NULL) return adreno_of_get_legacy_pwrlevels(adreno_dev, parent); for_each_child_of_node(node, child) { - unsigned int bin; if (of_property_read_u32(child, "qcom,speed-bin", &bin)) continue; @@ -829,6 +829,8 @@ static int adreno_of_get_pwrlevels(struct adreno_device *adreno_dev, } } + KGSL_CORE_ERR("GPU speed_bin:%d mismatch for efused bin:%d\n", + adreno_dev->speed_bin, bin); return -ENODEV; } @@ -1161,6 +1163,10 @@ static int adreno_init(struct kgsl_device *device) struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); int ret; + if (!adreno_is_a3xx(adreno_dev)) + kgsl_sharedmem_set(device, &device->scratch, 0, 0, + device->scratch.size); + ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT); if (ret) return ret; diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 4a0acdcf8844..305163147c1a 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -184,6 +184,7 @@ enum adreno_gpurev { #define ADRENO_TIMEOUT_FAULT BIT(2) #define ADRENO_IOMMU_PAGE_FAULT BIT(3) #define ADRENO_PREEMPT_FAULT BIT(4) +#define ADRENO_CTX_DETATCH_TIMEOUT_FAULT BIT(5) #define ADRENO_SPTP_PC_CTRL 0 #define ADRENO_PPD_CTRL 1 diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 3fb13c7a0814..78f74b883877 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -65,8 +65,8 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = { }; static void a5xx_irq_storm_worker(struct work_struct *work); -static int _read_fw2_block_header(uint32_t *header, uint32_t id, - uint32_t major, uint32_t minor); +static int _read_fw2_block_header(uint32_t *header, uint32_t remain, + uint32_t id, uint32_t major, uint32_t minor); static void a5xx_gpmu_reset(struct work_struct *work); static int a5xx_gpmu_init(struct adreno_device *adreno_dev); @@ -709,6 +709,7 @@ static int _load_gpmu_firmware(struct adreno_device *adreno_dev) if (data[1] != GPMU_FIRMWARE_ID) goto err; ret = _read_fw2_block_header(&data[2], + data[0] - 2, GPMU_FIRMWARE_ID, adreno_dev->gpucore->gpmu_major, adreno_dev->gpucore->gpmu_minor); @@ -1231,8 +1232,8 @@ void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on) kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180); } -static int _read_fw2_block_header(uint32_t *header, uint32_t id, - uint32_t major, uint32_t minor) +static int _read_fw2_block_header(uint32_t *header, uint32_t remain, + uint32_t id, uint32_t major, uint32_t minor) { uint32_t header_size; int i = 1; @@ -1242,7 +1243,8 @@ static int _read_fw2_block_header(uint32_t *header, uint32_t id, header_size = header[0]; /* Headers have limited size and always occur as pairs of words */ - if (header_size > MAX_HEADER_SIZE || header_size % 2) + if (header_size > MAX_HEADER_SIZE || header_size >= remain || + header_size % 2 || header_size == 0) return -EINVAL; /* Sequences must have an identifying id first thing in their header */ if (id == GPMU_SEQUENCE_ID) { @@ -1306,8 +1308,8 @@ static void _load_regfile(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); const struct firmware *fw; - uint32_t block_size = 0, block_total = 0, fw_size; - uint32_t *block; + uint64_t block_size = 0, block_total = 0; + uint32_t fw_size, *block; int ret = -EINVAL; if (!adreno_dev->gpucore->regfw_name) @@ -1329,7 +1331,8 @@ static void _load_regfile(struct adreno_device *adreno_dev) /* All offset numbers calculated from file description */ while (block_total < fw_size) { block_size = block[0]; - if (block_size >= fw_size || block_size < 2) + if (((block_total + block_size) >= fw_size) + || block_size < 5) goto err; if (block[1] != GPMU_SEQUENCE_ID) goto err; @@ -1337,6 +1340,7 @@ static void _load_regfile(struct adreno_device *adreno_dev) /* For now ignore blocks other than the LM sequence */ if (block[4] == LM_SEQUENCE_ID) { ret = _read_fw2_block_header(&block[2], + block_size - 2, GPMU_SEQUENCE_ID, adreno_dev->gpucore->lm_major, adreno_dev->gpucore->lm_minor); @@ -1344,6 +1348,9 @@ static void _load_regfile(struct adreno_device *adreno_dev) goto err; adreno_dev->lm_fw = fw; + + if (block[2] > (block_size - 2)) + goto err; adreno_dev->lm_sequence = block + block[2] + 3; adreno_dev->lm_size = block_size - block[2] - 2; } @@ -1356,7 +1363,7 @@ static void _load_regfile(struct adreno_device *adreno_dev) err: release_firmware(fw); KGSL_PWR_ERR(device, - "Register file failed to load sz=%d bsz=%d header=%d\n", + "Register file failed to load sz=%d bsz=%llu header=%d\n", fw_size, block_size, ret); return; } diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c index 496fc6a9248e..09effbd39a9c 100644 --- a/drivers/gpu/msm/adreno_a5xx_snapshot.c +++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c @@ -765,6 +765,8 @@ static void _a5xx_do_crashdump(struct kgsl_device *device) crash_dump_valid = false; + if (!device->snapshot_crashdumper) + return; if (capturescript.gpuaddr == 0 || registers.gpuaddr == 0) return; @@ -870,8 +872,7 @@ void a5xx_snapshot(struct adreno_device *adreno_dev, ARRAY_SIZE(a5xx_vbif_snapshot_registers)); /* Try to run the crash dumper */ - if (device->snapshot_crashdumper) - _a5xx_do_crashdump(device); + _a5xx_do_crashdump(device); kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot, a5xx_snapshot_registers, NULL); diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 1a94e71f5c1d..862d832823f7 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -1832,7 +1832,8 @@ static void process_cmdobj_fault(struct kgsl_device *device, * because we won't see this cmdobj again */ - if (fault & ADRENO_TIMEOUT_FAULT) + if ((fault & ADRENO_TIMEOUT_FAULT) || + (fault & ADRENO_CTX_DETATCH_TIMEOUT_FAULT)) bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG); /* diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c index b8ae24bc3935..3e765a61bd5e 100644 --- a/drivers/gpu/msm/adreno_drawctxt.c +++ b/drivers/gpu/msm/adreno_drawctxt.c @@ -301,6 +301,7 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device, /* Give the bad news to everybody waiting around */ wake_up_all(&drawctxt->waiting); wake_up_all(&drawctxt->wq); + wake_up_all(&drawctxt->timeout); } /* @@ -394,6 +395,7 @@ adreno_drawctxt_create(struct kgsl_device_private *dev_priv, spin_lock_init(&drawctxt->lock); init_waitqueue_head(&drawctxt->wq); init_waitqueue_head(&drawctxt->waiting); + init_waitqueue_head(&drawctxt->timeout); /* Set the context priority */ _set_context_priority(drawctxt); @@ -506,20 +508,32 @@ void adreno_drawctxt_detach(struct kgsl_context *context) drawctxt->internal_timestamp, 30 * 1000); /* - * If the wait for global fails due to timeout then nothing after this - * point is likely to work very well - Get GPU snapshot and BUG_ON() - * so we can take advantage of the debug tools to figure out what the - * h - e - double hockey sticks happened. If EAGAIN error is returned + * If the wait for global fails due to timeout then mark it as + * context detach timeout fault and schedule dispatcher to kick + * in GPU recovery. For a ADRENO_CTX_DETATCH_TIMEOUT_FAULT we clear + * the policy and invalidate the context. If EAGAIN error is returned * then recovery will kick in and there will be no more commands in the - * RB pipe from this context which is waht we are waiting for, so ignore - * -EAGAIN error + * RB pipe from this context which is what we are waiting for, so ignore + * -EAGAIN error. */ if (ret && ret != -EAGAIN) { - KGSL_DRV_ERR(device, "Wait for global ts=%d type=%d error=%d\n", - drawctxt->internal_timestamp, + KGSL_DRV_ERR(device, + "Wait for global ctx=%d ts=%d type=%d error=%d\n", + drawctxt->base.id, drawctxt->internal_timestamp, drawctxt->type, ret); - device->force_panic = 1; - kgsl_device_snapshot(device, context); + + adreno_set_gpu_fault(adreno_dev, + ADRENO_CTX_DETATCH_TIMEOUT_FAULT); + mutex_unlock(&device->mutex); + + /* Schedule dispatcher to kick in recovery */ + adreno_dispatcher_schedule(device); + + /* Wait for context to be invalidated and release context */ + ret = wait_event_interruptible_timeout(drawctxt->timeout, + kgsl_context_invalid(&drawctxt->base), + msecs_to_jiffies(5000)); + return; } kgsl_sharedmem_writel(device, &device->memstore, diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h index 0578f16ae9e1..07108eaf502f 100644 --- a/drivers/gpu/msm/adreno_drawctxt.h +++ b/drivers/gpu/msm/adreno_drawctxt.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -40,6 +40,7 @@ struct kgsl_context; * @pending: Priority list node for the dispatcher list of pending contexts * @wq: Workqueue structure for contexts to sleep pending room in the queue * @waiting: Workqueue structure for contexts waiting for a timestamp or event + * @timeout: Workqueue structure for contexts waiting to invalidate * @queued: Number of commands queued in the drawqueue * @fault_policy: GFT fault policy set in _skip_cmd(); * @debug_root: debugfs entry for this context. @@ -68,6 +69,7 @@ struct adreno_context { struct plist_node pending; wait_queue_head_t wq; wait_queue_head_t waiting; + wait_queue_head_t timeout; int queued; unsigned int fault_policy; diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index ddc53edce3c1..65e73356857f 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -203,8 +203,9 @@ int adreno_ringbuffer_start(struct adreno_device *adreno_dev, FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { kgsl_sharedmem_set(device, &(rb->buffer_desc), 0, 0xAA, KGSL_RB_SIZE); - kgsl_sharedmem_writel(device, &device->scratch, - SCRATCH_RPTR_OFFSET(rb->id), 0); + if (!adreno_is_a3xx(adreno_dev)) + kgsl_sharedmem_writel(device, &device->scratch, + SCRATCH_RPTR_OFFSET(rb->id), 0); rb->wptr = 0; rb->_wptr = 0; rb->wptr_preempt_end = 0xFFFFFFFF; @@ -265,9 +266,16 @@ static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev, int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt) { - int status = 0; + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); - int i; + int i, status; + + if (!adreno_is_a3xx(adreno_dev)) { + status = kgsl_allocate_global(device, &device->scratch, + PAGE_SIZE, 0, 0, "scratch"); + if (status != 0) + return status; + } if (nopreempt == false && ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) adreno_dev->num_ringbuffers = gpudev->num_prio_levels; @@ -303,9 +311,13 @@ static void _adreno_ringbuffer_close(struct adreno_device *adreno_dev, void adreno_ringbuffer_close(struct adreno_device *adreno_dev) { + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_ringbuffer *rb; int i; + if (!adreno_is_a3xx(adreno_dev)) + kgsl_free_global(device, &device->scratch); + FOR_EACH_RINGBUFFER(adreno_dev, rb, i) _adreno_ringbuffer_close(adreno_dev, rb); } @@ -489,12 +501,17 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) total_sizedwords += 9; - /* WAIT_MEM_WRITES - needed in the stall on fault case - * to prevent out of order CP operations that can result - * in a CACHE_FLUSH_TS interrupt storm */ - if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, + /* Don't insert any commands if stall on fault is not supported. */ + if ((ADRENO_GPUREV(adreno_dev) > 500) && !adreno_is_a510(adreno_dev)) { + /* + * WAIT_MEM_WRITES - needed in the stall on fault case + * to prevent out of order CP operations that can result + * in a CACHE_FLUSH_TS interrupt storm + */ + if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, &adreno_dev->ft_pf_policy)) - total_sizedwords += 1; + total_sizedwords += 1; + } ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords); if (IS_ERR(ringcmds)) @@ -581,14 +598,18 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, if (profile_ready) adreno_profile_postib_processing(adreno_dev, &flags, &ringcmds); - /* - * WAIT_MEM_WRITES - needed in the stall on fault case to prevent - * out of order CP operations that can result in a CACHE_FLUSH_TS - * interrupt storm - */ - if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, + /* Don't insert any commands if stall on fault is not supported. */ + if ((ADRENO_GPUREV(adreno_dev) > 500) && !adreno_is_a510(adreno_dev)) { + /* + * WAIT_MEM_WRITES - needed in the stall on fault case + * to prevent out of order CP operations that can result + * in a CACHE_FLUSH_TS interrupt storm + */ + if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, &adreno_dev->ft_pf_policy)) - *ringcmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 0); + *ringcmds++ = cp_packet(adreno_dev, + CP_WAIT_MEM_WRITES, 0); + } /* * Do a unique memory write from the GPU. This can be used in diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index afb489f10172..3f41b2b44924 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -1120,8 +1120,6 @@ static int kgsl_open_device(struct kgsl_device *device) atomic_inc(&device->active_cnt); kgsl_sharedmem_set(device, &device->memstore, 0, 0, device->memstore.size); - kgsl_sharedmem_set(device, &device->scratch, 0, 0, - device->scratch.size); result = device->ftbl->init(device); if (result) @@ -2263,7 +2261,7 @@ static long _gpuobj_map_useraddr(struct kgsl_device *device, struct kgsl_mem_entry *entry, struct kgsl_gpuobj_import *param) { - struct kgsl_gpuobj_import_useraddr useraddr; + struct kgsl_gpuobj_import_useraddr useraddr = {0}; int ret; param->flags &= KGSL_MEMFLAGS_GPUREADONLY @@ -3441,6 +3439,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv, return 0; } +/* entry->bind_lock must be held by the caller */ static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry, uint64_t v_offset, struct kgsl_memdesc *memdesc, @@ -3451,7 +3450,7 @@ static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry, struct sparse_bind_object *new; struct rb_node **node, *parent = NULL; - new = kzalloc(sizeof(*new), GFP_KERNEL); + new = kzalloc(sizeof(*new), GFP_ATOMIC); if (new == NULL) return -ENOMEM; @@ -3469,10 +3468,16 @@ static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry, parent = *node; this = rb_entry(parent, struct sparse_bind_object, node); - if (new->v_off < this->v_off) + if ((new->v_off < this->v_off) && + ((new->v_off + new->size) <= this->v_off)) node = &parent->rb_left; - else if (new->v_off > this->v_off) + else if ((new->v_off > this->v_off) && + (new->v_off >= (this->v_off + this->size))) node = &parent->rb_right; + else { + kfree(new); + return -EADDRINUSE; + } } rb_link_node(&new->node, parent, node); @@ -3485,7 +3490,6 @@ static int _sparse_rm_from_bind_tree(struct kgsl_mem_entry *entry, struct sparse_bind_object *obj, uint64_t v_offset, uint64_t size) { - spin_lock(&entry->bind_lock); if (v_offset == obj->v_off && size >= obj->size) { /* * We are all encompassing, remove the entry and free @@ -3518,7 +3522,6 @@ static int _sparse_rm_from_bind_tree(struct kgsl_mem_entry *entry, obj->size = v_offset - obj->v_off; - spin_unlock(&entry->bind_lock); ret = _sparse_add_to_bind_tree(entry, v_offset + size, obj->p_memdesc, obj->p_off + (v_offset - obj->v_off) + size, @@ -3528,11 +3531,10 @@ static int _sparse_rm_from_bind_tree(struct kgsl_mem_entry *entry, return ret; } - spin_unlock(&entry->bind_lock); - return 0; } +/* entry->bind_lock must be held by the caller */ static struct sparse_bind_object *_find_containing_bind_obj( struct kgsl_mem_entry *entry, uint64_t offset, uint64_t size) @@ -3540,8 +3542,6 @@ static struct sparse_bind_object *_find_containing_bind_obj( struct sparse_bind_object *obj = NULL; struct rb_node *node = entry->bind_tree.rb_node; - spin_lock(&entry->bind_lock); - while (node != NULL) { obj = rb_entry(node, struct sparse_bind_object, node); @@ -3560,33 +3560,16 @@ static struct sparse_bind_object *_find_containing_bind_obj( } } - spin_unlock(&entry->bind_lock); - return obj; } +/* entry->bind_lock must be held by the caller */ static int _sparse_unbind(struct kgsl_mem_entry *entry, struct sparse_bind_object *bind_obj, uint64_t offset, uint64_t size) { - struct kgsl_memdesc *memdesc = bind_obj->p_memdesc; - struct kgsl_pagetable *pt = memdesc->pagetable; int ret; - if (memdesc->cur_bindings < (size / PAGE_SIZE)) - return -EINVAL; - - memdesc->cur_bindings -= size / PAGE_SIZE; - - ret = kgsl_mmu_unmap_offset(pt, memdesc, - entry->memdesc.gpuaddr, offset, size); - if (ret) - return ret; - - ret = kgsl_mmu_sparse_dummy_map(pt, &entry->memdesc, offset, size); - if (ret) - return ret; - ret = _sparse_rm_from_bind_tree(entry, bind_obj, offset, size); if (ret == 0) { atomic_long_sub(size, &kgsl_driver.stats.mapped); @@ -3600,6 +3583,8 @@ static long sparse_unbind_range(struct kgsl_sparse_binding_object *obj, struct kgsl_mem_entry *virt_entry) { struct sparse_bind_object *bind_obj; + struct kgsl_memdesc *memdesc; + struct kgsl_pagetable *pt; int ret = 0; uint64_t size = obj->size; uint64_t tmp_size = obj->size; @@ -3607,9 +3592,14 @@ static long sparse_unbind_range(struct kgsl_sparse_binding_object *obj, while (size > 0 && ret == 0) { tmp_size = size; + + spin_lock(&virt_entry->bind_lock); bind_obj = _find_containing_bind_obj(virt_entry, offset, size); - if (bind_obj == NULL) + + if (bind_obj == NULL) { + spin_unlock(&virt_entry->bind_lock); return 0; + } if (bind_obj->v_off > offset) { tmp_size = size - bind_obj->v_off - offset; @@ -3626,7 +3616,28 @@ static long sparse_unbind_range(struct kgsl_sparse_binding_object *obj, tmp_size = bind_obj->size; } + memdesc = bind_obj->p_memdesc; + pt = memdesc->pagetable; + + if (memdesc->cur_bindings < (tmp_size / PAGE_SIZE)) { + spin_unlock(&virt_entry->bind_lock); + return -EINVAL; + } + + memdesc->cur_bindings -= tmp_size / PAGE_SIZE; + ret = _sparse_unbind(virt_entry, bind_obj, offset, tmp_size); + spin_unlock(&virt_entry->bind_lock); + + ret = kgsl_mmu_unmap_offset(pt, memdesc, + virt_entry->memdesc.gpuaddr, offset, tmp_size); + if (ret) + return ret; + + ret = kgsl_mmu_sparse_dummy_map(pt, memdesc, offset, tmp_size); + if (ret) + return ret; + if (ret == 0) { offset += tmp_size; size -= tmp_size; @@ -3687,8 +3698,11 @@ static int _sparse_bind(struct kgsl_process_private *process, return ret; } + spin_lock(&virt_entry->bind_lock); ret = _sparse_add_to_bind_tree(virt_entry, v_offset, memdesc, p_offset, size, flags); + spin_unlock(&virt_entry->bind_lock); + if (ret == 0) memdesc->cur_bindings += size / PAGE_SIZE; @@ -4399,13 +4413,13 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr, if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) { val = get_unmapped_area(NULL, addr, len, 0, flags); if (IS_ERR_VALUE(val)) - KGSL_MEM_ERR(device, + KGSL_DRV_ERR_RATELIMIT(device, "get_unmapped_area: pid %d addr %lx pgoff %lx len %ld failed error %d\n", private->pid, addr, pgoff, len, (int) val); } else { val = _get_svm_area(private, entry, addr, len, flags); if (IS_ERR_VALUE(val)) - KGSL_MEM_ERR(device, + KGSL_DRV_ERR_RATELIMIT(device, "_get_svm_area: pid %d mmap_base %lx addr %lx pgoff %lx len %ld failed error %d\n", private->pid, current->mm->mmap_base, addr, pgoff, len, (int) val); @@ -4724,11 +4738,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device) if (status != 0) goto error_close_mmu; - status = kgsl_allocate_global(device, &device->scratch, - PAGE_SIZE, 0, 0, "scratch"); - if (status != 0) - goto error_free_memstore; - /* * The default request type PM_QOS_REQ_ALL_CORES is * applicable to all CPU cores that are online and @@ -4774,8 +4783,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device) return 0; -error_free_memstore: - kgsl_free_global(device, &device->memstore); error_close_mmu: kgsl_mmu_close(device); error_pwrctrl_close: @@ -4803,8 +4810,6 @@ void kgsl_device_platform_remove(struct kgsl_device *device) idr_destroy(&device->context_idr); - kgsl_free_global(device, &device->scratch); - kgsl_free_global(device, &device->memstore); kgsl_mmu_close(device); diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h index 51baabefb6d3..9b7833bdb2df 100644 --- a/drivers/gpu/msm/kgsl_log.h +++ b/drivers/gpu/msm/kgsl_log.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2008-2011,2013-2014,2016 The Linux Foundation. +/* Copyright (c) 2002,2008-2011,2013-2014,2016-2017 The Linux Foundation. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -67,6 +67,13 @@ __func__, ##args);\ } while (0) +#define KGSL_LOG_ERR_RATELIMITED(dev, lvl, fmt, args...) \ + do { \ + if ((lvl) >= 3) \ + dev_err_ratelimited(dev, "|%s| " fmt, \ + __func__, ##args);\ + } while (0) + #define KGSL_DRV_INFO(_dev, fmt, args...) \ KGSL_LOG_INFO(_dev->dev, _dev->drv_log, fmt, ##args) #define KGSL_DRV_WARN(_dev, fmt, args...) \ @@ -77,6 +84,8 @@ KGSL_LOG_ERR(_dev->dev, _dev->drv_log, fmt, ##args) KGSL_LOG_CRIT(_dev->dev, _dev->drv_log, fmt, ##args) #define KGSL_DRV_CRIT_RATELIMIT(_dev, fmt, args...) \ KGSL_LOG_CRIT_RATELIMITED(_dev->dev, _dev->drv_log, fmt, ##args) +#define KGSL_DRV_ERR_RATELIMIT(_dev, fmt, args...) \ +KGSL_LOG_ERR_RATELIMITED(_dev->dev, _dev->drv_log, fmt, ##args) #define KGSL_DRV_FATAL(_dev, fmt, args...) \ KGSL_LOG_FATAL((_dev)->dev, (_dev)->drv_log, fmt, ##args) diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c index 685ce3ea968b..4a9997b02155 100644 --- a/drivers/gpu/msm/kgsl_pool.c +++ b/drivers/gpu/msm/kgsl_pool.c @@ -412,6 +412,24 @@ void kgsl_pool_free_page(struct page *page) __free_pages(page, page_order); } +/* + * Return true if the pool of specified page size is supported + * or no pools are supported otherwise return false. + */ +bool kgsl_pool_avaialable(int page_size) +{ + int i; + + if (!kgsl_num_pools) + return true; + + for (i = 0; i < kgsl_num_pools; i++) + if (ilog2(page_size >> PAGE_SHIFT) == kgsl_pools[i].pool_order) + return true; + + return false; +} + static void kgsl_pool_reserve_pages(void) { int i, j; diff --git a/drivers/gpu/msm/kgsl_pool.h b/drivers/gpu/msm/kgsl_pool.h index d55e1ada123b..8091afb1ff11 100644 --- a/drivers/gpu/msm/kgsl_pool.h +++ b/drivers/gpu/msm/kgsl_pool.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -40,5 +40,6 @@ void kgsl_exit_page_pools(void); int kgsl_pool_alloc_page(int *page_size, struct page **pages, unsigned int pages_len, unsigned int *align); void kgsl_pool_free_page(struct page *p); +bool kgsl_pool_avaialable(int size); #endif /* __KGSL_POOL_H */ diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c index 27733b068434..d3ba8ca0dc00 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.c +++ b/drivers/gpu/msm/kgsl_sharedmem.c @@ -28,7 +28,6 @@ #include "kgsl_device.h" #include "kgsl_log.h" #include "kgsl_mmu.h" -#include "kgsl_pool.h" /* * The user can set this from debugfs to force failed memory allocations to diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h index 7db8ce0413c2..e5da594b77b8 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.h +++ b/drivers/gpu/msm/kgsl_sharedmem.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -363,6 +363,8 @@ static inline void kgsl_free_sgt(struct sg_table *sgt) } } +#include "kgsl_pool.h" + /** * kgsl_get_page_size() - Get supported pagesize * @size: Size of the page @@ -373,11 +375,14 @@ static inline void kgsl_free_sgt(struct sg_table *sgt) #ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS static inline int kgsl_get_page_size(size_t size, unsigned int align) { - if (align >= ilog2(SZ_1M) && size >= SZ_1M) + if (align >= ilog2(SZ_1M) && size >= SZ_1M && + kgsl_pool_avaialable(SZ_1M)) return SZ_1M; - else if (align >= ilog2(SZ_64K) && size >= SZ_64K) + else if (align >= ilog2(SZ_64K) && size >= SZ_64K && + kgsl_pool_avaialable(SZ_64K)) return SZ_64K; - else if (align >= ilog2(SZ_8K) && size >= SZ_8K) + else if (align >= ilog2(SZ_8K) && size >= SZ_8K && + kgsl_pool_avaialable(SZ_8K)) return SZ_8K; else return PAGE_SIZE; diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 6b00061c3746..a2ae2213ef3e 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -294,7 +294,7 @@ static void dw_i2c_plat_complete(struct device *dev) #endif #ifdef CONFIG_PM -static int dw_i2c_plat_suspend(struct device *dev) +static int dw_i2c_plat_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); @@ -318,11 +318,21 @@ static int dw_i2c_plat_resume(struct device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int dw_i2c_plat_suspend(struct device *dev) +{ + pm_runtime_resume(dev); + return dw_i2c_plat_runtime_suspend(dev); +} +#endif + static const struct dev_pm_ops dw_i2c_dev_pm_ops = { .prepare = dw_i2c_plat_prepare, .complete = dw_i2c_plat_complete, SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) - SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) + SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, + dw_i2c_plat_resume, + NULL) }; #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index fa24d5196615..c7122919a8c0 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -194,7 +194,6 @@ struct bmc150_accel_data { struct device *dev; int irq; struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; - atomic_t active_intr; struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; struct mutex mutex; u8 fifo_mode, watermark; @@ -489,11 +488,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i, goto out_fix_power_state; } - if (state) - atomic_inc(&data->active_intr); - else - atomic_dec(&data->active_intr); - return 0; out_fix_power_state: @@ -1704,8 +1698,7 @@ static int bmc150_accel_resume(struct device *dev) struct bmc150_accel_data *data = iio_priv(indio_dev); mutex_lock(&data->mutex); - if (atomic_read(&data->active_intr)) - bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); + bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); bmc150_accel_fifo_set_mode(data); mutex_unlock(&data->mutex); diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c index 28ab4e52dab5..b3aa73f1a5a1 100644 --- a/drivers/iio/adc/qcom-rradc.c +++ b/drivers/iio/adc/qcom-rradc.c @@ -22,6 +22,7 @@ #include <linux/regmap.h> #include <linux/delay.h> #include <linux/qpnp/qpnp-revid.h> +#include <linux/power_supply.h> #define FG_ADC_RR_EN_CTL 0x46 #define FG_ADC_RR_SKIN_TEMP_LSB 0x50 @@ -192,8 +193,7 @@ #define FG_RR_ADC_STS_CHANNEL_READING_MASK 0x3 #define FG_RR_ADC_STS_CHANNEL_STS 0x2 -#define FG_RR_CONV_CONTINUOUS_TIME_MIN_US 50000 -#define FG_RR_CONV_CONTINUOUS_TIME_MAX_US 51000 +#define FG_RR_CONV_CONTINUOUS_TIME_MIN_MS 50 #define FG_RR_CONV_MAX_RETRY_CNT 50 #define FG_RR_TP_REV_VERSION1 21 #define FG_RR_TP_REV_VERSION2 29 @@ -235,6 +235,7 @@ struct rradc_chip { struct device_node *revid_dev_node; struct pmic_revid_data *pmic_fab_id; int volt; + struct power_supply *usb_trig; }; struct rradc_channels { @@ -726,6 +727,24 @@ static int rradc_disable_continuous_mode(struct rradc_chip *chip) return rc; } +static bool rradc_is_usb_present(struct rradc_chip *chip) +{ + union power_supply_propval pval; + int rc; + bool usb_present = false; + + if (!chip->usb_trig) { + pr_debug("USB property not present\n"); + return usb_present; + } + + rc = power_supply_get_property(chip->usb_trig, + POWER_SUPPLY_PROP_PRESENT, &pval); + usb_present = (rc < 0) ? 0 : pval.intval; + + return usb_present; +} + static int rradc_check_status_ready_with_retry(struct rradc_chip *chip, struct rradc_chan_prop *prop, u8 *buf, u16 status) { @@ -745,8 +764,18 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip, (retry_cnt < FG_RR_CONV_MAX_RETRY_CNT)) { pr_debug("%s is not ready; nothing to read:0x%x\n", rradc_chans[prop->channel].datasheet_name, buf[0]); - usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN_US, - FG_RR_CONV_CONTINUOUS_TIME_MAX_US); + + if (((prop->channel == RR_ADC_CHG_TEMP) || + (prop->channel == RR_ADC_SKIN_TEMP) || + (prop->channel == RR_ADC_USBIN_I) || + (prop->channel == RR_ADC_DIE_TEMP)) && + ((!rradc_is_usb_present(chip)))) { + pr_debug("USB not present for %d\n", prop->channel); + rc = -ENODATA; + break; + } + + msleep(FG_RR_CONV_CONTINUOUS_TIME_MIN_MS); retry_cnt++; rc = rradc_read(chip, status, buf, 1); if (rc < 0) { @@ -764,7 +793,7 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip, static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip, struct rradc_chan_prop *prop, u8 *buf) { - int rc = 0; + int rc = 0, ret = 0; u16 status = 0; rc = rradc_enable_continuous_mode(chip); @@ -777,23 +806,25 @@ static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip, rc = rradc_read(chip, status, buf, 1); if (rc < 0) { pr_err("status read failed:%d\n", rc); - return rc; + ret = rc; + goto disable; } rc = rradc_check_status_ready_with_retry(chip, prop, buf, status); if (rc < 0) { pr_err("Status read failed:%d\n", rc); - return rc; + ret = rc; } +disable: rc = rradc_disable_continuous_mode(chip); if (rc < 0) { pr_err("Failed to switch to non continuous mode\n"); - return rc; + ret = rc; } - return rc; + return ret; } static int rradc_enable_batt_id_channel(struct rradc_chip *chip, bool enable) @@ -1149,6 +1180,10 @@ static int rradc_probe(struct platform_device *pdev) indio_dev->channels = chip->iio_chans; indio_dev->num_channels = chip->nchannels; + chip->usb_trig = power_supply_get_by_name("usb"); + if (!chip->usb_trig) + pr_debug("Error obtaining usb power supply\n"); + return devm_iio_device_register(dev, indio_dev); } diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index b10f629cc44b..1dbc2143cdfc 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -77,7 +77,7 @@ #define VF610_ADC_ADSTS_MASK 0x300 #define VF610_ADC_ADLPC_EN 0x80 #define VF610_ADC_ADHSC_EN 0x400 -#define VF610_ADC_REFSEL_VALT 0x100 +#define VF610_ADC_REFSEL_VALT 0x800 #define VF610_ADC_REFSEL_VBG 0x1000 #define VF610_ADC_ADTRG_HARD 0x2000 #define VF610_ADC_AVGS_8 0x4000 diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index 0a86ef43e781..a8db38db622e 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) s32 poll_value = 0; if (state) { - if (!atomic_read(&st->user_requested_state)) - return 0; if (sensor_hub_device_open(st->hsdev)) return -EIO; @@ -84,6 +82,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) &report_val); } + pr_debug("HID_SENSOR %s set power_state %d report_state %d\n", + st->pdev->name, state_val, report_val); + sensor_hub_get_feature(st->hsdev, st->power_state.report_id, st->power_state.index, sizeof(state_val), &state_val); @@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) ret = pm_runtime_get_sync(&st->pdev->dev); else { pm_runtime_mark_last_busy(&st->pdev->dev); + pm_runtime_use_autosuspend(&st->pdev->dev); ret = pm_runtime_put_autosuspend(&st->pdev->dev); } if (ret < 0) { @@ -175,8 +177,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, /* Default to 3 seconds, but can be changed from sysfs */ pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, 3000); - pm_runtime_use_autosuspend(&attrb->pdev->dev); - return ret; error_unreg_trigger: iio_trigger_unregister(trig); diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index 2485b88ee1b6..1880105cc8c4 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .gyro_max_val = IIO_RAD_TO_DEGREE(22500), .gyro_max_scale = 450, .accel_max_val = IIO_M_S_2_TO_G(12500), - .accel_max_scale = 5, + .accel_max_scale = 10, }, [ADIS16485] = { .channels = adis16485_channels, diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c index 12731d6b89ec..ec1b2e798cc1 100644 --- a/drivers/iio/light/tsl2563.c +++ b/drivers/iio/light/tsl2563.c @@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private) struct tsl2563_chip *chip = iio_priv(dev_info); iio_push_event(dev_info, - IIO_UNMOD_EVENT_CODE(IIO_LIGHT, + IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index b0edb66a291b..0b7f5a701c60 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1581,7 +1581,7 @@ isert_rcv_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, u32 xfer_len) { - struct ib_device *ib_dev = isert_conn->cm_id->device; + struct ib_device *ib_dev = isert_conn->device->ib_device; struct iscsi_hdr *hdr; u64 rx_dma; int rx_buflen; diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c index a5ea27ad0e16..fdcc14653b64 100644 --- a/drivers/input/misc/keychord.c +++ b/drivers/input/misc/keychord.c @@ -60,6 +60,10 @@ struct keychord_device { unsigned char head; unsigned char tail; __u16 buff[BUFFER_SIZE]; + /* Bit to serialize writes to this device */ +#define KEYCHORD_BUSY 0x01 + unsigned long flags; + wait_queue_head_t write_waitq; }; static int check_keychord(struct keychord_device *kdev, @@ -172,7 +176,6 @@ static int keychord_connect(struct input_handler *handler, goto err_input_open_device; pr_info("keychord: using input dev %s for fevent\n", dev->name); - return 0; err_input_open_device: @@ -225,6 +228,41 @@ static ssize_t keychord_read(struct file *file, char __user *buffer, } /* + * serializes writes on a device. can use mutex_lock_interruptible() + * for this particular use case as well - a matter of preference. + */ +static int +keychord_write_lock(struct keychord_device *kdev) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&kdev->lock, flags); + while (kdev->flags & KEYCHORD_BUSY) { + spin_unlock_irqrestore(&kdev->lock, flags); + ret = wait_event_interruptible(kdev->write_waitq, + ((kdev->flags & KEYCHORD_BUSY) == 0)); + if (ret) + return ret; + spin_lock_irqsave(&kdev->lock, flags); + } + kdev->flags |= KEYCHORD_BUSY; + spin_unlock_irqrestore(&kdev->lock, flags); + return 0; +} + +static void +keychord_write_unlock(struct keychord_device *kdev) +{ + unsigned long flags; + + spin_lock_irqsave(&kdev->lock, flags); + kdev->flags &= ~KEYCHORD_BUSY; + spin_unlock_irqrestore(&kdev->lock, flags); + wake_up_interruptible(&kdev->write_waitq); +} + +/* * keychord_write is used to configure the driver */ static ssize_t keychord_write(struct file *file, const char __user *buffer, @@ -232,9 +270,11 @@ static ssize_t keychord_write(struct file *file, const char __user *buffer, { struct keychord_device *kdev = file->private_data; struct input_keychord *keychords = 0; - struct input_keychord *keychord, *next, *end; + struct input_keychord *keychord; int ret, i, key; unsigned long flags; + size_t resid = count; + size_t key_bytes; if (count < sizeof(struct input_keychord)) return -EINVAL; @@ -248,6 +288,22 @@ static ssize_t keychord_write(struct file *file, const char __user *buffer, return -EFAULT; } + /* + * Serialize writes to this device to prevent various races. + * 1) writers racing here could do duplicate input_unregister_handler() + * calls, resulting in attempting to unlink a node from a list that + * does not exist. + * 2) writers racing here could do duplicate input_register_handler() calls + * below, resulting in a duplicate insertion of a node into the list. + * 3) a double kfree of keychords can occur (in the event that + * input_register_handler() fails below. + */ + ret = keychord_write_lock(kdev); + if (ret) { + kfree(keychords); + return ret; + } + /* unregister handler before changing configuration */ if (kdev->registered) { input_unregister_handler(&kdev->input_handler); @@ -265,15 +321,29 @@ static ssize_t keychord_write(struct file *file, const char __user *buffer, kdev->head = kdev->tail = 0; keychord = keychords; - end = (struct input_keychord *)((char *)keychord + count); - while (keychord < end) { - next = NEXT_KEYCHORD(keychord); - if (keychord->count <= 0 || next > end) { + while (resid > 0) { + /* Is the entire keychord entry header present ? */ + if (resid < sizeof(struct input_keychord)) { + pr_err("keychord: Insufficient bytes present for header %zu\n", + resid); + goto err_unlock_return; + } + resid -= sizeof(struct input_keychord); + if (keychord->count <= 0) { pr_err("keychord: invalid keycode count %d\n", keychord->count); goto err_unlock_return; } + key_bytes = keychord->count * sizeof(keychord->keycodes[0]); + /* Do we have all the expected keycodes ? */ + if (resid < key_bytes) { + pr_err("keychord: Insufficient bytes present for keycount %zu\n", + resid); + goto err_unlock_return; + } + resid -= key_bytes; + if (keychord->version != KEYCHORD_VERSION) { pr_err("keychord: unsupported version %d\n", keychord->version); @@ -292,7 +362,7 @@ static ssize_t keychord_write(struct file *file, const char __user *buffer, } kdev->keychord_count++; - keychord = next; + keychord = NEXT_KEYCHORD(keychord); } kdev->keychords = keychords; @@ -302,15 +372,19 @@ static ssize_t keychord_write(struct file *file, const char __user *buffer, if (ret) { kfree(keychords); kdev->keychords = 0; + keychord_write_unlock(kdev); return ret; } kdev->registered = 1; + keychord_write_unlock(kdev); + return count; err_unlock_return: spin_unlock_irqrestore(&kdev->lock, flags); kfree(keychords); + keychord_write_unlock(kdev); return -EINVAL; } @@ -336,6 +410,7 @@ static int keychord_open(struct inode *inode, struct file *file) spin_lock_init(&kdev->lock); init_waitqueue_head(&kdev->waitq); + init_waitqueue_head(&kdev->write_waitq); kdev->input_handler.event = keychord_event; kdev->input_handler.connect = keychord_connect; @@ -357,6 +432,7 @@ static int keychord_release(struct inode *inode, struct file *file) if (kdev->registered) input_unregister_handler(&kdev->input_handler); + kfree(kdev->keychords); kfree(kdev); return 0; diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index da5458dfb1e3..681dce15fbc8 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1234,7 +1234,12 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0000", 0 }, { "ELAN0100", 0 }, { "ELAN0600", 0 }, + { "ELAN0602", 0 }, { "ELAN0605", 0 }, + { "ELAN0608", 0 }, + { "ELAN0605", 0 }, + { "ELAN0609", 0 }, + { "ELAN060B", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 354d47ecd66a..ce6ff9b301bb 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) return -1; - if (param[0] != TP_MAGIC_IDENT) + /* add new TP ID. */ + if (!(param[0] & TP_MAGIC_IDENT)) return -1; if (firmware_id) diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 5617ed3a7d7a..88055755f82e 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -21,8 +21,9 @@ #define TP_COMMAND 0xE2 /* Commands start with this */ #define TP_READ_ID 0xE1 /* Sent for device identification */ -#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ +#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */ /* by the firmware ID */ + /* Firmware ID includes 0x1, 0x2, 0x3 */ /* diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index ce18a512b76a..6317478916ef 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1243,6 +1243,7 @@ static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain) list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) { arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size); /* pages will be freed later (after being unassigned) */ + list_del(&it->list); kfree(it); } } @@ -1728,7 +1729,7 @@ static void arm_smmu_pgtbl_unlock(struct arm_smmu_domain *smmu_domain, static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu) { int ret; - u64 scm_ret; + u64 scm_ret = 0; if (!arm_smmu_is_static_cb(smmu)) return 0; @@ -1956,10 +1957,20 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); - arm_smmu_tlb_inv_context(smmu_domain); - arm_smmu_disable_clocks(smmu_domain->smmu); + if (smmu_domain->pgtbl_ops) { + free_io_pgtable_ops(smmu_domain->pgtbl_ops); + /* unassign any freed page table memory */ + if (arm_smmu_is_master_side_secure(smmu_domain)) { + arm_smmu_secure_domain_lock(smmu_domain); + arm_smmu_secure_pool_destroy(smmu_domain); + arm_smmu_unassign_table(smmu_domain); + arm_smmu_secure_domain_unlock(smmu_domain); + } + smmu_domain->pgtbl_ops = NULL; + } + free_irqs: if (cfg->irptndx != INVALID_IRPTNDX) { irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 347a3c17f73a..041c42fb511f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -514,16 +514,6 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg)); } -int iommu_dma_supported(struct device *dev, u64 mask) -{ - /* - * 'Special' IOMMUs which don't have the same addressing capability - * as the CPU will have to wait until we have some way to query that - * before they'll be able to use this framework. - */ - return 1; -} - int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == DMA_ERROR_CODE; diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index 37199b9b2cfa..831a195cb806 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c @@ -148,9 +148,9 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root) struct device_node *np; void __iomem *regs; - np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc"); + np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc"); if (!np) - np = of_find_compatible_node(root, NULL, + np = of_find_compatible_node(NULL, NULL, "atmel,at91sam9x5-rtc"); if (!np) @@ -202,7 +202,6 @@ void __init aic_common_irq_fixup(const struct of_device_id *matches) return; match = of_match_node(matches, root); - of_node_put(root); if (match) { void (*fixup)(struct device_node *) = match->data; diff --git a/drivers/media/i2c/adv7481.c b/drivers/media/i2c/adv7481.c index b382a1d83d92..4f3887c087ce 100644 --- a/drivers/media/i2c/adv7481.c +++ b/drivers/media/i2c/adv7481.c @@ -55,8 +55,6 @@ #define LOCK_MAX_SLEEP 6000 #define LOCK_NUM_TRIES 200 -#define MAX_DEFAULT_WIDTH 1280 -#define MAX_DEFAULT_HEIGHT 720 #define MAX_DEFAULT_FRAME_RATE 60 #define MAX_DEFAULT_PIX_CLK_HZ 74240000 @@ -1576,8 +1574,7 @@ static int adv7481_get_hdmi_timings(struct adv7481_state *state, } else { pr_err("%s(%d): PLL not locked return EBUSY\n", __func__, __LINE__); - ret = -EBUSY; - goto set_default; + return -EBUSY; } /* Check Timing Lock */ @@ -1697,17 +1694,6 @@ static int adv7481_get_hdmi_timings(struct adv7481_state *state, (hdmi_params->pix_rep + 1)); } -set_default: - if (ret) { - pr_debug("%s(%d), error %d resort to default fmt\n", - __func__, __LINE__, ret); - vid_params->act_pix = MAX_DEFAULT_WIDTH; - vid_params->act_lines = MAX_DEFAULT_HEIGHT; - vid_params->fr_rate = MAX_DEFAULT_FRAME_RATE; - vid_params->pix_clk = MAX_DEFAULT_PIX_CLK_HZ; - vid_params->intrlcd = 0; - ret = 0; - } pr_debug("%s(%d), adv7481 TMDS Resolution: %d x %d @ %d fps\n", __func__, __LINE__, @@ -2051,7 +2037,7 @@ static int adv7481_set_op_stream(struct adv7481_state *state, bool on) __func__, on, state->csia_src, state->csib_src); if (on && state->csia_src != ADV7481_IP_NONE) if (ADV7481_IP_HDMI == state->csia_src) { - state->tx_lanes = ADV7481_MIPI_2LANE; + state->tx_lanes = ADV7481_MIPI_4LANE; ret = adv7481_set_audio_spdif(state, on); ret |= adv7481_csi_powerup(state, ADV7481_OP_CSIA); } else { @@ -2066,7 +2052,7 @@ static int adv7481_set_op_stream(struct adv7481_state *state, bool on) /* Turn off */ if (ADV7481_IP_NONE != state->csia_src) { if (ADV7481_IP_HDMI == state->csia_src) { - state->tx_lanes = ADV7481_MIPI_1LANE; + state->tx_lanes = ADV7481_MIPI_4LANE; ret = adv7481_set_audio_spdif(state, on); } else { state->tx_lanes = ADV7481_MIPI_1LANE; diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c index a18fe5d47238..b4857cd7069e 100644 --- a/drivers/media/pci/saa7164/saa7164-bus.c +++ b/drivers/media/pci/saa7164/saa7164-bus.c @@ -393,11 +393,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size); msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command); msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector); + memcpy(msg, &msg_tmp, sizeof(*msg)); /* No need to update the read positions, because this was a peek */ /* If the caller specifically want to peek, return */ if (peekonly) { - memcpy(msg, &msg_tmp, sizeof(*msg)); goto peekout; } @@ -442,21 +442,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, space_rem = bus->m_dwSizeGetRing - curr_grp; if (space_rem < sizeof(*msg)) { - /* msg wraps around the ring */ - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem); - memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing, - sizeof(*msg) - space_rem); if (buf) memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) - space_rem, buf_size); } else if (space_rem == sizeof(*msg)) { - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) memcpy_fromio(buf, bus->m_pdwGetRing, buf_size); } else { /* Additional data wraps around the ring */ - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) { memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), space_rem - sizeof(*msg)); @@ -469,15 +463,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, } else { /* No wrapping */ - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), buf_size); } - /* Convert from little endian to CPU */ - msg->size = le16_to_cpu((__force __le16)msg->size); - msg->command = le32_to_cpu((__force __le32)msg->command); - msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector); /* Update the read positions, adjusting the ring */ saa7164_writel(bus->m_dwGetReadPos, new_grp); diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index 7767e072d623..1f656a3a84b9 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -1709,27 +1709,9 @@ static long vpfe_param_handler(struct file *file, void *priv, switch (cmd) { case VPFE_CMD_S_CCDC_RAW_PARAMS: + ret = -EINVAL; v4l2_warn(&vpfe_dev->v4l2_dev, - "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); - if (ccdc_dev->hw_ops.set_params) { - ret = ccdc_dev->hw_ops.set_params(param); - if (ret) { - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "Error setting parameters in CCDC\n"); - goto unlock_out; - } - ret = vpfe_get_ccdc_image_format(vpfe_dev, - &vpfe_dev->fmt); - if (ret < 0) { - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "Invalid image format at CCDC\n"); - goto unlock_out; - } - } else { - ret = -EINVAL; - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); - } + "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); break; default: ret = -ENOTTY; diff --git a/drivers/media/platform/msm/ais/camera/camera.c b/drivers/media/platform/msm/ais/camera/camera.c index 33808d18d4c4..3f477d50ceaf 100644 --- a/drivers/media/platform/msm/ais/camera/camera.c +++ b/drivers/media/platform/msm/ais/camera/camera.c @@ -628,6 +628,7 @@ static int camera_v4l2_open(struct file *filep) if (WARN_ON(!pvdev)) return -EIO; + mutex_lock(&pvdev->video_drvdata_mutex); rc = camera_v4l2_fh_open(filep); if (rc < 0) { pr_err("%s : camera_v4l2_fh_open failed Line %d rc %d\n", @@ -698,6 +699,7 @@ static int camera_v4l2_open(struct file *filep) idx |= (1 << find_first_zero_bit((const unsigned long *)&opn_idx, MSM_CAMERA_STREAM_CNT_BITS)); atomic_cmpxchg(&pvdev->opened, opn_idx, idx); + mutex_unlock(&pvdev->video_drvdata_mutex); return rc; @@ -712,6 +714,7 @@ stream_fail: vb2_q_fail: camera_v4l2_fh_release(filep); fh_open_fail: + mutex_unlock(&pvdev->video_drvdata_mutex); return rc; } @@ -746,6 +749,7 @@ static int camera_v4l2_close(struct file *filep) if (WARN_ON(!session)) return -EIO; + mutex_lock(&pvdev->video_drvdata_mutex); mutex_lock(&session->close_lock); opn_idx = atomic_read(&pvdev->opened); mask = (1 << sp->stream_id); @@ -788,6 +792,7 @@ static int camera_v4l2_close(struct file *filep) } camera_v4l2_fh_release(filep); + mutex_unlock(&pvdev->video_drvdata_mutex); return 0; } @@ -936,6 +941,7 @@ int camera_init_v4l2(struct device *dev, unsigned int *session) *session = pvdev->vdev->num; atomic_set(&pvdev->opened, 0); + mutex_init(&pvdev->video_drvdata_mutex); video_set_drvdata(pvdev->vdev, pvdev); device_init_wakeup(&pvdev->vdev->dev, 1); goto init_end; diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.c b/drivers/media/platform/msm/ais/isp/msm_isp47.c index d33dc758aef9..04e879fc3bcf 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp47.c +++ b/drivers/media/platform/msm/ais/isp/msm_isp47.c @@ -699,6 +699,12 @@ void msm_vfe47_reg_update(struct vfe_device *vfe_dev, vfe_dev->reg_update_requested; if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) && ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) { + if (!vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]) { + pr_err("%s vfe_base for ISP_VFE0 is NULL\n", __func__); + spin_unlock_irqrestore(&vfe_dev->reg_update_lock, + flags); + return; + } msm_camera_io_w_mb(update_mask, vfe_dev->common_data->dual_vfe_res-> vfe_base[ISP_VFE0] + 0x4AC); diff --git a/drivers/media/platform/msm/ais/ispif/msm_ispif.c b/drivers/media/platform/msm/ais/ispif/msm_ispif.c index c41f4546da5f..bb75e69ea215 100644 --- a/drivers/media/platform/msm/ais/ispif/msm_ispif.c +++ b/drivers/media/platform/msm/ais/ispif/msm_ispif.c @@ -46,7 +46,7 @@ #define ISPIF_INTF_CMD_DISABLE_IMMEDIATELY 0x02 #define ISPIF_TIMEOUT_SLEEP_US 1000 -#define ISPIF_TIMEOUT_ALL_US 1000000 +#define ISPIF_TIMEOUT_ALL_US 200000 #define ISPIF_SOF_DEBUG_COUNT 0 #undef CDBG diff --git a/drivers/media/platform/msm/ais/msm.c b/drivers/media/platform/msm/ais/msm.c index a3a742182e76..902e05b3329b 100644 --- a/drivers/media/platform/msm/ais/msm.c +++ b/drivers/media/platform/msm/ais/msm.c @@ -292,6 +292,7 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id) return; while (1) { + unsigned long wl_flags; if (try_count > 5) { pr_err("%s : not able to delete stream %d\n", @@ -299,18 +300,20 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id) break; } - write_lock(&session->stream_rwlock); + write_lock_irqsave(&session->stream_rwlock, wl_flags); try_count++; stream = msm_queue_find(&session->stream_q, struct msm_stream, list, __msm_queue_find_stream, &stream_id); if (!stream) { - write_unlock(&session->stream_rwlock); + write_unlock_irqrestore(&session->stream_rwlock, + wl_flags); return; } if (msm_vb2_get_stream_state(stream) != 1) { - write_unlock(&session->stream_rwlock); + write_unlock_irqrestore(&session->stream_rwlock, + wl_flags); continue; } @@ -320,7 +323,7 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id) kfree(stream); stream = NULL; spin_unlock_irqrestore(&(session->stream_q.lock), flags); - write_unlock(&session->stream_rwlock); + write_unlock_irqrestore(&session->stream_rwlock, wl_flags); break; } @@ -731,6 +734,16 @@ static long msm_private_ioctl(struct file *file, void *fh, if (!event_data) return -EINVAL; + switch (cmd) { + case MSM_CAM_V4L2_IOCTL_NOTIFY: + case MSM_CAM_V4L2_IOCTL_CMD_ACK: + case MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG: + case MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR: + break; + default: + return -ENOTTY; + } + memset(&event, 0, sizeof(struct v4l2_event)); session_id = event_data->session_id; stream_id = event_data->stream_id; diff --git a/drivers/media/platform/msm/ais/msm.h b/drivers/media/platform/msm/ais/msm.h index 5d456310c301..ff3008ec6872 100644 --- a/drivers/media/platform/msm/ais/msm.h +++ b/drivers/media/platform/msm/ais/msm.h @@ -46,6 +46,7 @@ extern bool is_daemon_status; struct msm_video_device { struct video_device *vdev; atomic_t opened; + struct mutex video_drvdata_mutex; }; struct msm_queue_head { diff --git a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c index 36aa3f62fbec..1cbc49c8485c 100644 --- a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c +++ b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c @@ -47,22 +47,23 @@ static int msm_vb2_buf_init(struct vb2_buffer *vb) struct msm_session *session; struct msm_vb2_buffer *msm_vb2_buf; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + unsigned long rl_flags; session = msm_get_session_from_vb2q(vb->vb2_queue); if (IS_ERR_OR_NULL(session)) return -EINVAL; - read_lock(&session->stream_rwlock); + read_lock_irqsave(&session->stream_rwlock, rl_flags); stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s: Couldn't find stream\n", __func__); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return -EINVAL; } msm_vb2_buf = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf); msm_vb2_buf->in_freeq = 0; - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return 0; } @@ -71,7 +72,7 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb) struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; struct msm_session *session; - unsigned long flags; + unsigned long flags, rl_flags; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); msm_vb2 = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf); @@ -84,19 +85,19 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb) if (IS_ERR_OR_NULL(session)) return; - read_lock(&session->stream_rwlock); + read_lock_irqsave(&session->stream_rwlock, rl_flags); stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s:%d] NULL stream", __func__, __LINE__); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return; } spin_lock_irqsave(&stream->stream_lock, flags); list_add_tail(&msm_vb2->list, &stream->queued_list); spin_unlock_irqrestore(&stream->stream_lock, flags); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); } static void msm_vb2_buf_finish(struct vb2_buffer *vb) @@ -104,7 +105,7 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb) struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; struct msm_session *session; - unsigned long flags; + unsigned long flags, rl_flags; struct msm_vb2_buffer *msm_vb2_entry, *temp; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); @@ -118,12 +119,12 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb) if (IS_ERR_OR_NULL(session)) return; - read_lock(&session->stream_rwlock); + read_lock_irqsave(&session->stream_rwlock, rl_flags); stream = msm_get_stream_from_vb2q(vb->vb2_queue); if (!stream) { pr_err("%s:%d] NULL stream", __func__, __LINE__); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return; } @@ -136,7 +137,7 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb) } } spin_unlock_irqrestore(&stream->stream_lock, flags); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); } static void msm_vb2_stop_stream(struct vb2_queue *q) @@ -144,19 +145,19 @@ static void msm_vb2_stop_stream(struct vb2_queue *q) struct msm_vb2_buffer *msm_vb2, *temp; struct msm_stream *stream; struct msm_session *session; - unsigned long flags; + unsigned long flags, rl_flags; struct vb2_v4l2_buffer *vb2_v4l2_buf; session = msm_get_session_from_vb2q(q); if (IS_ERR_OR_NULL(session)) return; - read_lock(&session->stream_rwlock); + read_lock_irqsave(&session->stream_rwlock, rl_flags); stream = msm_get_stream_from_vb2q(q); if (!stream) { pr_err_ratelimited("%s:%d] NULL stream", __func__, __LINE__); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return; } @@ -176,7 +177,7 @@ static void msm_vb2_stop_stream(struct vb2_queue *q) msm_vb2->in_freeq = 0; } spin_unlock_irqrestore(&stream->stream_lock, flags); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); } int msm_vb2_get_stream_state(struct msm_stream *stream) @@ -255,17 +256,17 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id, struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; struct msm_session *session; struct msm_vb2_buffer *msm_vb2 = NULL; - unsigned long flags; + unsigned long flags, rl_flags; session = msm_get_session(session_id); if (IS_ERR_OR_NULL(session)) return NULL; - read_lock(&session->stream_rwlock); + read_lock_irqsave(&session->stream_rwlock, rl_flags); stream = msm_get_stream(session, stream_id); if (IS_ERR_OR_NULL(stream)) { - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return NULL; } @@ -291,7 +292,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id, vb2_v4l2_buf = NULL; end: spin_unlock_irqrestore(&stream->stream_lock, flags); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return vb2_v4l2_buf; } @@ -302,18 +303,18 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id, struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; struct msm_session *session; struct msm_vb2_buffer *msm_vb2 = NULL; - unsigned long flags; + unsigned long flags, rl_flags; session = msm_get_session(session_id); if (IS_ERR_OR_NULL(session)) return NULL; - read_lock(&session->stream_rwlock); + read_lock_irqsave(&session->stream_rwlock, rl_flags); stream = msm_get_stream(session, stream_id); if (IS_ERR_OR_NULL(stream)) { - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return NULL; } @@ -337,7 +338,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id, vb2_v4l2_buf = NULL; end: spin_unlock_irqrestore(&stream->stream_lock, flags); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return vb2_v4l2_buf; } @@ -349,17 +350,17 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id, struct msm_vb2_buffer *msm_vb2; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; int rc = 0; - unsigned long flags; + unsigned long flags, rl_flags; session = msm_get_session(session_id); if (IS_ERR_OR_NULL(session)) return -EINVAL; - read_lock(&session->stream_rwlock); + read_lock_irqsave(&session->stream_rwlock, rl_flags); stream = msm_get_stream(session, stream_id); if (IS_ERR_OR_NULL(stream)) { - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return -EINVAL; } @@ -374,6 +375,8 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id, pr_err("VB buffer is INVALID vb=%pK, ses_id=%d, str_id=%d\n", vb, session_id, stream_id); spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, + rl_flags); return -EINVAL; } msm_vb2 = @@ -390,7 +393,7 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id, rc = -EINVAL; } spin_unlock_irqrestore(&stream->stream_lock, flags); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return rc; } @@ -398,7 +401,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, unsigned int stream_id, uint32_t sequence, struct timeval *ts, uint32_t reserved) { - unsigned long flags; + unsigned long flags, rl_flags; struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; @@ -409,11 +412,11 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, if (IS_ERR_OR_NULL(session)) return -EINVAL; - read_lock(&session->stream_rwlock); + read_lock_irqsave(&session->stream_rwlock, rl_flags); stream = msm_get_stream(session, stream_id); if (IS_ERR_OR_NULL(stream)) { - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return -EINVAL; } @@ -428,6 +431,8 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%pK\n", session_id, stream_id, vb); spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, + rl_flags); return -EINVAL; } msm_vb2 = container_of(vb2_v4l2_buf, @@ -448,7 +453,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, rc = -EINVAL; } spin_unlock_irqrestore(&stream->stream_lock, flags); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return rc; } @@ -459,18 +464,18 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id, struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; struct msm_session *session; struct msm_vb2_buffer *msm_vb2 = NULL; - unsigned long flags; + unsigned long flags, rl_flags; long rc = -EINVAL; session = msm_get_session(session_id); if (IS_ERR_OR_NULL(session)) return rc; - read_lock(&session->stream_rwlock); + read_lock_irqsave(&session->stream_rwlock, rl_flags); stream = msm_get_stream(session, stream_id); if (IS_ERR_OR_NULL(stream)) { - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return -EINVAL; } @@ -499,14 +504,14 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id, end: spin_unlock_irqrestore(&stream->stream_lock, flags); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return rc; } EXPORT_SYMBOL(msm_vb2_return_buf_by_idx); static int msm_vb2_flush_buf(int session_id, unsigned int stream_id) { - unsigned long flags; + unsigned long flags, rl_flags; struct msm_vb2_buffer *msm_vb2; struct msm_stream *stream; struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; @@ -516,11 +521,11 @@ static int msm_vb2_flush_buf(int session_id, unsigned int stream_id) if (IS_ERR_OR_NULL(session)) return -EINVAL; - read_lock(&session->stream_rwlock); + read_lock_irqsave(&session->stream_rwlock, rl_flags); stream = msm_get_stream(session, stream_id); if (IS_ERR_OR_NULL(stream)) { - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return -EINVAL; } @@ -532,7 +537,7 @@ static int msm_vb2_flush_buf(int session_id, unsigned int stream_id) msm_vb2->in_freeq = 0; } spin_unlock_irqrestore(&stream->stream_lock, flags); - read_unlock(&session->stream_rwlock); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); return 0; } diff --git a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c index 024677e1b755..e4f534859bf2 100644 --- a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c +++ b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c @@ -1092,6 +1092,9 @@ static long msm_flash_subdev_do_ioctl( break; } break; + case VIDIOC_MSM_FLASH_CFG: + pr_err("invalid cmd 0x%x received\n", cmd); + return -EINVAL; default: return msm_flash_subdev_ioctl(sd, cmd, arg); } diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor.c b/drivers/media/platform/msm/ais/sensor/msm_sensor.c index a276b03e5294..9655fad5b62b 100644 --- a/drivers/media/platform/msm/ais/sensor/msm_sensor.c +++ b/drivers/media/platform/msm/ais/sensor/msm_sensor.c @@ -10,6 +10,12 @@ * GNU General Public License for more details. */ +#include <media/v4l2-subdev.h> +#include <media/v4l2-dev.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-device.h> +#include <media/v4l2-fh.h> +#include <media/v4l2-event.h> #include "msm_sensor.h" #include "msm_sd.h" #include "msm_cci.h" @@ -21,6 +27,7 @@ #undef CDBG #define CDBG(fmt, args...) pr_debug(fmt, ##args) +#define MAX_SENSOR_V4l2_EVENTS 100 static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl; static struct msm_camera_i2c_fn_t msm_sensor_secure_func_tbl; @@ -405,12 +412,26 @@ static long msm_sensor_subdev_do_ioctl( { struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); - + struct v4l2_fh *vfh = file->private_data; switch (cmd) { case VIDIOC_MSM_SENSOR_CFG32: cmd = VIDIOC_MSM_SENSOR_CFG; + case VIDIOC_DQEVENT: { + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) + return -ENOIOCTLCMD; + return v4l2_event_dequeue(vfh, arg, + file->f_flags & O_NONBLOCK); + } + break; + case VIDIOC_SUBSCRIBE_EVENT: + pr_debug("msm_sensor_subdev_do_ioctl:VIDIOC_SUBSCRIBE_EVENT"); + return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg); + + case VIDIOC_UNSUBSCRIBE_EVENT: + return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg); default: - return msm_sensor_subdev_ioctl(sd, cmd, arg); + pr_debug("msm_sensor.c msm_sensor_subdev_do_ioctl"); + return v4l2_subdev_call(sd, core, ioctl, cmd, arg); } } @@ -1459,8 +1480,108 @@ static int msm_sensor_power(struct v4l2_subdev *sd, int on) return rc; } + +static u32 msm_sensor_evt_mask_to_sensor_event(u32 evt_mask) +{ + u32 evt_id = SENSOR_EVENT_SUBS_MASK_NONE; + + switch (evt_mask) { + case SENSOR_EVENT_MASK_INDEX_SIGNAL_STATUS: + evt_id = SENSOR_EVENT_SIGNAL_STATUS; + break; + default: + evt_id = SENSOR_EVENT_SUBS_MASK_NONE; + break; + } + + return evt_id; +} + +static int msm_sensor_subscribe_event_mask(struct v4l2_fh *fh, + struct v4l2_event_subscription *sub, int evt_mask_index, + u32 evt_id, bool subscribe_flag) +{ + int rc = 0; + + sub->type = evt_id; + + if (subscribe_flag) + rc = v4l2_event_subscribe(fh, sub, + MAX_SENSOR_V4l2_EVENTS, NULL); + else + rc = v4l2_event_unsubscribe(fh, sub); + if (rc != 0) { + pr_err("%s: Subs event_type =0x%x failed\n", + __func__, sub->type); + return rc; + } + return rc; +} + +static int msm_sensor_process_event_subscription(struct v4l2_fh *fh, + struct v4l2_event_subscription *sub, bool subscribe_flag) +{ + int rc = 0, evt_mask_index = 0; + u32 evt_mask = sub->type; + u32 evt_id = 0; + + if (SENSOR_EVENT_SUBS_MASK_NONE == evt_mask) { + pr_err("%s: Subs event_type is None=0x%x\n", + __func__, evt_mask); + return 0; + } + + evt_mask_index = SENSOR_EVENT_MASK_INDEX_SIGNAL_STATUS; + if (evt_mask & (1<<evt_mask_index)) { + evt_id = + msm_sensor_evt_mask_to_sensor_event( + evt_mask_index); + rc = msm_sensor_subscribe_event_mask(fh, sub, + evt_mask_index, evt_id, subscribe_flag); + if (rc != 0) { + pr_err("%s: Subs event index:%d failed\n", + __func__, evt_mask_index); + return rc; + } + } + + return rc; +} + +int msm_sensor_send_event(struct msm_sensor_ctrl_t *s_ctrl, + uint32_t event_type, + struct msm_sensor_event_data *event_data) +{ + struct v4l2_event sensor_event; + + memset(&sensor_event, 0, sizeof(struct v4l2_event)); + sensor_event.id = 0; + sensor_event.type = event_type; + + memcpy(&sensor_event.u.data[0], event_data, + sizeof(struct msm_sensor_event_data)); + v4l2_event_queue(s_ctrl->msm_sd.sd.devnode, &sensor_event); + return 0; +} + +static int msm_sensor_subscribe_event(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + return msm_sensor_process_event_subscription(fh, sub, true); +} + +static int msm_sensor_unsubscribe_event(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + return msm_sensor_process_event_subscription(fh, sub, false); +} + static struct v4l2_subdev_core_ops msm_sensor_subdev_core_ops = { .ioctl = msm_sensor_subdev_ioctl, + .subscribe_event = msm_sensor_subscribe_event, + .unsubscribe_event = msm_sensor_unsubscribe_event, .s_power = msm_sensor_power, }; diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor.h b/drivers/media/platform/msm/ais/sensor/msm_sensor.h index eacd3b05420c..b742d06d3baa 100644 --- a/drivers/media/platform/msm/ais/sensor/msm_sensor.h +++ b/drivers/media/platform/msm/ais/sensor/msm_sensor.h @@ -90,10 +90,18 @@ struct msm_sensor_ctrl_t { uint32_t set_mclk_23880000; uint8_t is_csid_tg_mode; uint32_t is_secure; - + /* Interrupt GPIOs */ + struct gpio gpio_array[1]; + /* device status and Flags */ + int irq; struct msm_sensor_init_t s_init; + /* worker to handle interrupts */ + struct delayed_work irq_delayed_work; }; +int msm_sensor_send_event(struct msm_sensor_ctrl_t *s_ctrl, + uint32_t event_type, struct msm_sensor_event_data *event_data); + int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void *argp); int msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl); diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c index 58bddb1a3fba..5e34016d199c 100644 --- a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c +++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c @@ -132,6 +132,7 @@ static int32_t msm_sensor_driver_create_v4l_subdev s_ctrl->sensordata->sensor_name); v4l2_set_subdevdata(&s_ctrl->msm_sd.sd, s_ctrl->pdev); s_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + s_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS; media_entity_init(&s_ctrl->msm_sd.sd.entity, 0, NULL, 0); s_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV; s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR; @@ -143,6 +144,8 @@ static int32_t msm_sensor_driver_create_v4l_subdev return rc; } msm_cam_copy_v4l2_subdev_fops(&msm_sensor_v4l2_subdev_fops); + msm_sensor_v4l2_subdev_fops.unlocked_ioctl = + msm_sensor_subdev_fops_ioctl; #ifdef CONFIG_COMPAT msm_sensor_v4l2_subdev_fops.compat_ioctl32 = msm_sensor_subdev_fops_ioctl; @@ -632,6 +635,56 @@ static void msm_sensor_fill_sensor_info(struct msm_sensor_ctrl_t *s_ctrl, strlcpy(entity_name, s_ctrl->msm_sd.sd.entity.name, MAX_SENSOR_NAME); } +static irqreturn_t bridge_irq(int irq, void *dev) +{ + struct msm_sensor_ctrl_t *s_ctrl = dev; + + pr_err("msm_sensor_driver: received bridge interrupt:0x%x", + s_ctrl->sensordata->slave_info->sensor_slave_addr); + schedule_delayed_work(&s_ctrl->irq_delayed_work, + msecs_to_jiffies(0)); + return IRQ_HANDLED; +} + +static void bridge_irq_delay_work(struct work_struct *work) +{ + struct msm_sensor_ctrl_t *s_ctrl; + struct msm_camera_i2c_client *sensor_i2c_client; + struct msm_camera_slave_info *slave_info; + const char *sensor_name; + + struct msm_sensor_event_data sensor_event; + + s_ctrl = container_of(work, struct msm_sensor_ctrl_t, + irq_delayed_work.work); + if (!s_ctrl) { + pr_err("%s:%d failed: %pK\n", + __func__, __LINE__, s_ctrl); + goto exit_queue; + } + sensor_i2c_client = s_ctrl->sensor_i2c_client; + slave_info = s_ctrl->sensordata->slave_info; + sensor_name = s_ctrl->sensordata->sensor_name; + + if (!sensor_i2c_client || !slave_info || !sensor_name) { + pr_err("%s:%d failed: %pK %pK %pK\n", + __func__, __LINE__, sensor_i2c_client, slave_info, + sensor_name); + goto exit_queue; + } + + mutex_lock(s_ctrl->msm_sensor_mutex); + /* Fill the sensor event */ + sensor_event.sensor_slave_addr = + slave_info->sensor_slave_addr; + /* Queue the event */ + msm_sensor_send_event(s_ctrl, SENSOR_EVENT_SIGNAL_STATUS, + &sensor_event); + mutex_unlock(s_ctrl->msm_sensor_mutex); +exit_queue: + pr_err("Work IRQ exit"); +} + /* static function definition */ int32_t msm_sensor_driver_probe(void *setting, struct msm_sensor_info_t *probed_info, char *entity_name) @@ -934,12 +987,66 @@ CSID_TG: msm_sensor_fill_sensor_info(s_ctrl, probed_info, entity_name); - /* Set probe succeeded flag to 1 so that no other camera shall - * probed on this slot - */ + if (slave_info->gpio_intr_config.gpio_num != -1) { + /* Configure INTB interrupt */ + s_ctrl->gpio_array[0].gpio = + slave_info->gpio_intr_config.gpio_num; + s_ctrl->gpio_array[0].flags = 0; + /* Only setup IRQ1 for now... */ + INIT_DELAYED_WORK(&s_ctrl->irq_delayed_work, + bridge_irq_delay_work); + rc = gpio_request_array(&s_ctrl->gpio_array[0], 1); + if (rc < 0) { + pr_err("%s: Failed to request irq_gpio %d", + __func__, rc); + goto cancel_work; + } + + if (gpio_is_valid(s_ctrl->gpio_array[0].gpio)) { + rc |= gpio_direction_input( + s_ctrl->gpio_array[0].gpio); + if (rc) { + pr_err("%s: Failed gpio_direction irq %d", + __func__, rc); + goto cancel_work; + } else { + pr_err("sensor probe IRQ direction succeeded"); + } + } + + s_ctrl->irq = gpio_to_irq(s_ctrl->gpio_array[0].gpio); + if (s_ctrl->irq) { + rc = request_irq(s_ctrl->irq, bridge_irq, + IRQF_ONESHOT | + (slave_info-> + gpio_intr_config.gpio_trigger), + "qcom,camera", s_ctrl); + if (rc) { + pr_err("%s: Failed request_irq %d", + __func__, rc); + goto cancel_work; + } + + } else { + pr_err("%s: Failed gpio_to_irq %d", + __func__, rc); + rc = -EINVAL; + goto cancel_work; + } + + /* Keep irq enabled */ + pr_err("msm_sensor_driver.c irq number = %d", s_ctrl->irq); + } + + /* + Set probe succeeded flag to 1 so that no other camera shall + * probed on this slot + */ s_ctrl->is_probe_succeed = 1; return rc; +cancel_work: + cancel_delayed_work(&s_ctrl->irq_delayed_work); free_camera_info: kfree(camera_info); free_slave_info: @@ -1125,7 +1232,6 @@ static int32_t msm_sensor_driver_parse(struct msm_sensor_ctrl_t *s_ctrl) /* Store sensor control structure in static database */ g_sctrl[s_ctrl->id] = s_ctrl; CDBG("g_sctrl[%d] %pK", s_ctrl->id, g_sctrl[s_ctrl->id]); - return rc; FREE_DT_DATA: @@ -1178,7 +1284,6 @@ static int32_t msm_sensor_driver_platform_probe(struct platform_device *pdev) /* Fill platform device id*/ pdev->id = s_ctrl->id; - /* Fill device in power info */ s_ctrl->sensordata->power_info.dev = &pdev->dev; diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c index f6d7f5fb8d32..8a49c7cf9f4a 100644 --- a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c +++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c @@ -424,7 +424,7 @@ int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg, curr_vreg = &cam_vreg[j]; reg_ptr[j] = regulator_get(dev, curr_vreg->reg_name); - if (IS_ERR(reg_ptr[j])) { + if (IS_ERR_OR_NULL(reg_ptr[j])) { pr_err("%s: %s get failed\n", __func__, curr_vreg->reg_name); @@ -531,7 +531,7 @@ int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg, continue; } else j = i; - if (IS_ERR(reg_ptr[j])) { + if (IS_ERR_OR_NULL(reg_ptr[j])) { pr_err("%s: %s null regulator\n", __func__, cam_vreg[j].reg_name); goto disable_vreg; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c index 8f3cffb4c3da..eab56b70e646 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c @@ -86,7 +86,7 @@ struct msm_isp_bufq *msm_isp_get_bufq( /* bufq_handle cannot be 0 */ if ((bufq_handle == 0) || bufq_index >= BUF_MGR_NUM_BUF_Q || - (bufq_index > buf_mgr->num_buf_q)) + (bufq_index >= buf_mgr->num_buf_q)) return NULL; bufq = &buf_mgr->bufq[bufq_index]; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c index 23f936258660..22eb86f4f875 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c @@ -53,7 +53,6 @@ MODULE_DEVICE_TABLE(of, msm_vfe_dt_match); #define MAX_OVERFLOW_COUNTERS 29 #define OVERFLOW_LENGTH 1024 #define OVERFLOW_BUFFER_LENGTH 64 -static char stat_line[OVERFLOW_LENGTH]; struct msm_isp_statistics stats; struct msm_isp_ub_info ub_info; @@ -113,19 +112,30 @@ static int vfe_debugfs_statistics_open(struct inode *inode, struct file *file) return 0; } -static ssize_t vfe_debugfs_statistics_read(struct file *t_file, char *t_char, - size_t t_size_t, loff_t *t_loff_t) +static ssize_t vfe_debugfs_statistics_read(struct file *t_file, + char __user *t_char, size_t t_size_t, loff_t *t_loff_t) { int i; + size_t rc; uint64_t *ptr; char buffer[OVERFLOW_BUFFER_LENGTH] = {0}; + char *stat_line; struct vfe_device *vfe_dev = (struct vfe_device *) t_file->private_data; - struct msm_isp_statistics *stats = vfe_dev->stats; + struct msm_isp_statistics *stats; - memset(stat_line, 0, sizeof(stat_line)); + stat_line = kzalloc(OVERFLOW_LENGTH, GFP_KERNEL); + if (!stat_line) + return -ENOMEM; + spin_lock(&vfe_dev->common_data->common_dev_data_lock); + stats = vfe_dev->stats; msm_isp_util_get_bandwidth_stats(vfe_dev, stats); + spin_unlock(&vfe_dev->common_data->common_dev_data_lock); ptr = (uint64_t *)(stats); + if (MAX_OVERFLOW_COUNTERS > OVERFLOW_LENGTH) { + kfree(stat_line); + return -EINVAL; + } for (i = 0; i < MAX_OVERFLOW_COUNTERS; i++) { strlcat(stat_line, stats_str[i], sizeof(stat_line)); strlcat(stat_line, " ", sizeof(stat_line)); @@ -133,8 +143,10 @@ static ssize_t vfe_debugfs_statistics_read(struct file *t_file, char *t_char, strlcat(stat_line, buffer, sizeof(stat_line)); strlcat(stat_line, "\r\n", sizeof(stat_line)); } - return simple_read_from_buffer(t_char, t_size_t, + rc = simple_read_from_buffer(t_char, t_size_t, t_loff_t, stat_line, strlen(stat_line)); + kfree(stat_line); + return rc; } static ssize_t vfe_debugfs_statistics_write(struct file *t_file, @@ -142,8 +154,12 @@ static ssize_t vfe_debugfs_statistics_write(struct file *t_file, { struct vfe_device *vfe_dev = (struct vfe_device *) t_file->private_data; - struct msm_isp_statistics *stats = vfe_dev->stats; + struct msm_isp_statistics *stats; + + spin_lock(&vfe_dev->common_data->common_dev_data_lock); + stats = vfe_dev->stats; memset(stats, 0, sizeof(struct msm_isp_statistics)); + spin_unlock(&vfe_dev->common_data->common_dev_data_lock); return sizeof(struct msm_isp_statistics); } diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c index f5533fd9062e..981832b5a586 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c @@ -702,6 +702,12 @@ static void msm_vfe40_reg_update(struct vfe_device *vfe_dev, vfe_dev->reg_update_requested; if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) && ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) { + if (!vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]) { + pr_err("%s vfe_base for ISP_VFE0 is NULL\n", __func__); + spin_unlock_irqrestore(&vfe_dev->reg_update_lock, + flags); + return; + } msm_camera_io_w_mb(update_mask, vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0] + 0x378); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c index c85bf1655b8c..cc4dd5eaf93e 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c @@ -560,6 +560,12 @@ static void msm_vfe44_reg_update(struct vfe_device *vfe_dev, vfe_dev->reg_update_requested; if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) && ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) { + if (!vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]) { + pr_err("%s vfe_base for ISP_VFE0 is NULL\n", __func__); + spin_unlock_irqrestore(&vfe_dev->reg_update_lock, + flags); + return; + } msm_camera_io_w_mb(update_mask, vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0] + 0x378); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c index 72ce32940c29..632624034a04 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c @@ -499,6 +499,12 @@ static void msm_vfe46_reg_update(struct vfe_device *vfe_dev, vfe_dev->reg_update_requested; if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) && ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) { + if (!vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]) { + pr_err("%s vfe_base for ISP_VFE0 is NULL\n", __func__); + spin_unlock_irqrestore(&vfe_dev->reg_update_lock, + flags); + return; + } msm_camera_io_w_mb(update_mask, vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0] + 0x3D8); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c index 24d1c6cba84d..6716bb6caad6 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c @@ -735,6 +735,12 @@ void msm_vfe47_reg_update(struct vfe_device *vfe_dev, vfe_dev->reg_update_requested; if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) && ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) { + if (!vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]) { + pr_err("%s vfe_base for ISP_VFE0 is NULL\n", __func__); + spin_unlock_irqrestore(&vfe_dev->reg_update_lock, + flags); + return; + } msm_camera_io_w_mb(update_mask, vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0] + 0x4AC); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index 63f5497e63b8..66c5ce11ea3d 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -674,6 +674,7 @@ void msm_isp_process_reg_upd_epoch_irq(struct vfe_device *vfe_dev, void msm_isp_reset_framedrop(struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info) { + uint32_t framedrop_period = 0; stream_info->runtime_num_burst_capture = stream_info->num_burst_capture; /** @@ -682,9 +683,15 @@ void msm_isp_reset_framedrop(struct vfe_device *vfe_dev, * by the request frame api */ if (!stream_info->controllable_output) { - stream_info->current_framedrop_period = + framedrop_period = msm_isp_get_framedrop_period( stream_info->frame_skip_pattern); + if (stream_info->frame_skip_pattern == SKIP_ALL) + stream_info->current_framedrop_period = + MSM_VFE_STREAM_STOP_PERIOD; + else + stream_info->current_framedrop_period = + framedrop_period; } msm_isp_cfg_framedrop_reg(stream_info); diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c index e87f2414a879..92b1f2ea871b 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c @@ -196,7 +196,7 @@ uint32_t msm_isp_get_framedrop_period( return 32; break; case SKIP_ALL: - return 1; + return SKIP_ALL; default: return 1; } diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c index 7cfeffc92d48..194a6583103e 100644 --- a/drivers/media/platform/msm/camera_v2/msm.c +++ b/drivers/media/platform/msm/camera_v2/msm.c @@ -746,6 +746,16 @@ static long msm_private_ioctl(struct file *file, void *fh, if (!event_data) return -EINVAL; + switch (cmd) { + case MSM_CAM_V4L2_IOCTL_NOTIFY: + case MSM_CAM_V4L2_IOCTL_CMD_ACK: + case MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG: + case MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR: + break; + default: + return -ENOTTY; + } + memset(&event, 0, sizeof(struct v4l2_event)); session_id = event_data->session_id; stream_id = event_data->stream_id; diff --git a/drivers/media/platform/msm/camera_v2/sensor/Makefile b/drivers/media/platform/msm/camera_v2/sensor/Makefile index 872dc59d218e..b04560fe42bc 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/Makefile +++ b/drivers/media/platform/msm/camera_v2/sensor/Makefile @@ -5,4 +5,5 @@ ccflags-y += -Idrivers/media/platform/msm/camera_v2/camera ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci obj-$(CONFIG_MSMB_CAMERA) += cci/ io/ csiphy/ csid/ actuator/ eeprom/ ois/ flash/ ir_led/ ir_cut/ +obj-$(CONFIG_MSMB_CAMERA) += laser_led/ obj-$(CONFIG_MSM_CAMERA_SENSOR) += msm_sensor_init.o msm_sensor_driver.o msm_sensor.o diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c index 3cb6b55ccc8c..f2c765a4649f 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c +++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c @@ -331,6 +331,9 @@ static int32_t msm_cci_addr_to_num_bytes( case MSM_CAMERA_I2C_3B_ADDR: retVal = 3; break; + case MSM_CAMERA_I2C_DWORD_ADDR: + retVal = 4; + break; default: pr_err("%s: %d failed: %d\n", __func__, __LINE__, addr_type); retVal = 1; diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c index 223ddf39dce8..c77367ed1603 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c +++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c @@ -1213,6 +1213,9 @@ static long msm_flash_subdev_do_ioctl( break; } break; + case VIDIOC_MSM_FLASH_CFG: + pr_err("invalid cmd 0x%x received\n", cmd); + return -EINVAL; default: return msm_flash_subdev_ioctl(sd, cmd, arg); } diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c index 6d9b0e987d0d..fc6ceb1b590f 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c +++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c @@ -67,7 +67,8 @@ int32_t msm_camera_cci_i2c_read_seq(struct msm_camera_i2c_client *client, if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR - && client->addr_type != MSM_CAMERA_I2C_3B_ADDR) + && client->addr_type != MSM_CAMERA_I2C_3B_ADDR + && client->addr_type != MSM_CAMERA_I2C_DWORD_ADDR) || num_byte == 0) return rc; diff --git a/drivers/media/platform/msm/camera_v2/sensor/laser_led/Makefile b/drivers/media/platform/msm/camera_v2/sensor/laser_led/Makefile new file mode 100644 index 000000000000..e981fc2e1f9c --- /dev/null +++ b/drivers/media/platform/msm/camera_v2/sensor/laser_led/Makefile @@ -0,0 +1,5 @@ +ccflags-y += -Idrivers/media/platform/msm/camera_v2 +ccflags-y += -Idrivers/media/platform/msm/camera_v2/common +ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io +ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci +obj-$(CONFIG_MSMB_CAMERA) += msm_laser_led.o diff --git a/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.c b/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.c new file mode 100644 index 000000000000..c368f081f97b --- /dev/null +++ b/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.c @@ -0,0 +1,573 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include "msm_laser_led.h" +#include "msm_camera_dt_util.h" +#include "msm_sd.h" +#include "msm_cci.h" + +#undef CDBG +#define CDBG(fmt, args...) pr_debug(fmt, ##args) + +DEFINE_MSM_MUTEX(msm_laser_led_mutex); + +static struct v4l2_file_operations msm_laser_led_v4l2_subdev_fops; + +static const struct of_device_id msm_laser_led_dt_match[] = { + {.compatible = "qcom,laser-led", .data = NULL}, + {} +}; + +static long msm_laser_led_subdev_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, void *arg); + +static int32_t msm_laser_led_get_subdev_id( + struct msm_laser_led_ctrl_t *laser_led_ctrl, void __user *arg) +{ + int32_t __user *subdev_id = (int32_t __user *)arg; + + CDBG("Enter\n"); + if (!subdev_id) { + pr_err("subdevice ID is not valid\n"); + return -EINVAL; + } + + if (laser_led_ctrl->laser_led_device_type != + MSM_CAMERA_PLATFORM_DEVICE) { + pr_err("device type is not matching\n"); + return -EINVAL; + } + + if (copy_to_user(arg, &laser_led_ctrl->pdev->id, + sizeof(int32_t))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + return -EFAULT; + } + + CDBG("Exit: subdev_id %d\n", laser_led_ctrl->pdev->id); + return 0; +} + +static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = { + .i2c_read = msm_camera_cci_i2c_read, + .i2c_read_seq = msm_camera_cci_i2c_read_seq, + .i2c_write = msm_camera_cci_i2c_write, + .i2c_write_table = msm_camera_cci_i2c_write_table, + .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table, + .i2c_write_table_w_microdelay = + msm_camera_cci_i2c_write_table_w_microdelay, + .i2c_util = msm_sensor_cci_i2c_util, + .i2c_poll = msm_camera_cci_i2c_poll, +}; +#ifdef CONFIG_COMPAT +static int32_t msm_laser_led_init( + struct msm_laser_led_ctrl_t *laser_led_ctrl, + struct msm_laser_led_cfg_data_t32 __user *laser_led_data) +#else +static int32_t msm_laser_led_init( + struct msm_laser_led_ctrl_t *laser_led_ctrl, + struct msm_laser_led_cfg_data_t __user *laser_led_data) +#endif +{ + int32_t rc = -EFAULT; + struct msm_camera_cci_client *cci_client = NULL; + + CDBG("Enter\n"); + + if (laser_led_ctrl->laser_led_state == MSM_CAMERA_LASER_LED_INIT) { + pr_err("Invalid laser_led state = %d\n", + laser_led_ctrl->laser_led_state); + return 0; + } + + rc = laser_led_ctrl->i2c_client.i2c_func_tbl->i2c_util( + &laser_led_ctrl->i2c_client, MSM_CCI_INIT); + if (rc < 0) + pr_err("cci_init failed\n"); + + cci_client = laser_led_ctrl->i2c_client.cci_client; + + if (copy_from_user(&(cci_client->sid), + &(laser_led_data->i2c_addr), + sizeof(uint16_t))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + return -EFAULT; + } + cci_client->sid = cci_client->sid >> 1; + cci_client->retries = 3; + cci_client->id_map = 0; + + if (copy_from_user(&(cci_client->i2c_freq_mode), + &(laser_led_data->i2c_freq_mode), + sizeof(enum i2c_freq_mode_t))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + return -EFAULT; + } + + laser_led_ctrl->laser_led_state = MSM_CAMERA_LASER_LED_INIT; + + CDBG("Exit\n"); + return 0; +} + +static int msm_laser_led_close(struct v4l2_subdev *sd, + struct v4l2_subdev_fh *fh) { + int rc = 0; + struct msm_laser_led_ctrl_t *l_ctrl = v4l2_get_subdevdata(sd); + + CDBG("Enter\n"); + if (!l_ctrl) { + pr_err("failed: subdev data is null\n"); + return -EINVAL; + } + mutex_lock(l_ctrl->laser_led_mutex); + if (l_ctrl->laser_led_device_type == MSM_CAMERA_PLATFORM_DEVICE && + l_ctrl->laser_led_state != MSM_CAMERA_LASER_LED_RELEASE) { + rc = l_ctrl->i2c_client.i2c_func_tbl->i2c_util( + &l_ctrl->i2c_client, MSM_CCI_RELEASE); + if (rc < 0) + pr_err("cci_init failed: %d\n", rc); + } + l_ctrl->laser_led_state = MSM_CAMERA_LASER_LED_RELEASE; + mutex_unlock(l_ctrl->laser_led_mutex); + CDBG("Exit\n"); + return rc; +} + +#ifdef CONFIG_COMPAT +static long msm_laser_led_subdev_do_ioctl( + struct file *file, unsigned int cmd, void *arg) +{ + int32_t rc = 0; + struct video_device *vdev = video_devdata(file); + struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + + CDBG("Enter\n"); + switch (cmd) { + case VIDIOC_MSM_LASER_LED_CFG32: + cmd = VIDIOC_MSM_LASER_LED_CFG; + default: + rc = msm_laser_led_subdev_ioctl(sd, cmd, arg); + } + + CDBG("Exit\n"); + return rc; +} + +static long msm_laser_led_subdev_fops_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + return msm_laser_led_subdev_do_ioctl(file, cmd, (void *)arg); +} + +static int32_t msm_laser_led_control32( + struct msm_laser_led_ctrl_t *laser_led_ctrl, + void __user *argp) +{ + struct msm_camera_i2c_reg_setting32 conf_array32; + struct msm_camera_i2c_reg_setting conf_array; + int32_t rc = 0; + struct msm_laser_led_cfg_data_t32 laser_led_data; + uint32_t *debug_reg; + int i; + uint16_t local_data; + + if (laser_led_ctrl->laser_led_state != MSM_CAMERA_LASER_LED_INIT) { + pr_err("%s:%d failed: invalid state %d\n", __func__, + __LINE__, laser_led_ctrl->laser_led_state); + return -EFAULT; + } + + if (copy_from_user(&laser_led_data, + argp, + sizeof(struct msm_laser_led_cfg_data_t32))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + return -EFAULT; + } + + if (copy_from_user(&conf_array32, + (compat_ptr)(laser_led_data.setting), + sizeof(struct msm_camera_i2c_reg_setting32))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + return -EFAULT; + } + + conf_array.addr_type = conf_array32.addr_type; + conf_array.data_type = conf_array32.data_type; + conf_array.delay = conf_array32.delay; + conf_array.size = conf_array32.size; + + if (!conf_array.size || + conf_array.size > I2C_REG_DATA_MAX) { + pr_err("%s:%d failed\n", __func__, __LINE__); + return -EFAULT; + } + + conf_array.reg_setting = kzalloc(conf_array.size * + (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL); + if (!conf_array.reg_setting) + return -ENOMEM; + + if (copy_from_user(conf_array.reg_setting, + (compat_ptr)(conf_array32.reg_setting), + conf_array.size * + sizeof(struct msm_camera_i2c_reg_array))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + kfree(conf_array.reg_setting); + return -EFAULT; + } + + debug_reg = kzalloc(laser_led_data.debug_reg_size * + (sizeof(uint32_t)), GFP_KERNEL); + if (!debug_reg) { + kfree(conf_array.reg_setting); + return -ENOMEM; + } + + if (copy_from_user(debug_reg, + (void __user *)compat_ptr(laser_led_data.debug_reg), + laser_led_data.debug_reg_size * + sizeof(uint32_t))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + kfree(conf_array.reg_setting); + kfree(debug_reg); + return -EFAULT; + } + + laser_led_ctrl->i2c_client.addr_type = conf_array.addr_type; + + rc = laser_led_ctrl->i2c_client.i2c_func_tbl-> + i2c_write_table(&(laser_led_ctrl->i2c_client), + &conf_array); + + for (i = 0; i < laser_led_data.debug_reg_size; i++) { + rc = laser_led_ctrl->i2c_client.i2c_func_tbl->i2c_read( + &(laser_led_ctrl->i2c_client), + debug_reg[i], + &local_data, conf_array.data_type); + } + + kfree(conf_array.reg_setting); + kfree(debug_reg); + + return rc; +} +#endif + +static int32_t msm_laser_led_control( + struct msm_laser_led_ctrl_t *laser_led_ctrl, + void __user *argp) +{ + struct msm_camera_i2c_reg_setting conf_array; + struct msm_laser_led_cfg_data_t laser_led_data; + + uint32_t *debug_reg; + int i; + uint16_t local_data; + int32_t rc = 0; + + if (laser_led_ctrl->laser_led_state != MSM_CAMERA_LASER_LED_INIT) { + pr_err("%s:%d failed: invalid state %d\n", __func__, + __LINE__, laser_led_ctrl->laser_led_state); + return -EFAULT; + } + + if (copy_from_user(&laser_led_data, + argp, + sizeof(struct msm_laser_led_cfg_data_t))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + return -EFAULT; + } + + if (copy_from_user(&conf_array, + (laser_led_data.setting), + sizeof(struct msm_camera_i2c_reg_setting))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + return -EFAULT; + } + + if (!conf_array.size || + conf_array.size > I2C_REG_DATA_MAX) { + pr_err("%s:%d failed\n", __func__, __LINE__); + return -EFAULT; + } + + conf_array.reg_setting = kzalloc(conf_array.size * + (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL); + if (!conf_array.reg_setting) + return -ENOMEM; + + if (copy_from_user(conf_array.reg_setting, (void __user *)( + conf_array.reg_setting), + conf_array.size * + sizeof(struct msm_camera_i2c_reg_array))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + kfree(conf_array.reg_setting); + return -EFAULT; + } + + debug_reg = kzalloc(laser_led_data.debug_reg_size * + (sizeof(uint32_t)), GFP_KERNEL); + if (!debug_reg) { + kfree(conf_array.reg_setting); + return -ENOMEM; + } + + if (copy_from_user(debug_reg, + (laser_led_data.debug_reg), + laser_led_data.debug_reg_size * + sizeof(uint32_t))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + kfree(debug_reg); + kfree(conf_array.reg_setting); + return -EFAULT; + } + + laser_led_ctrl->i2c_client.addr_type = conf_array.addr_type; + + rc = laser_led_ctrl->i2c_client.i2c_func_tbl-> + i2c_write_table(&(laser_led_ctrl->i2c_client), + &conf_array); + + for (i = 0; i < laser_led_data.debug_reg_size; i++) { + rc = laser_led_ctrl->i2c_client.i2c_func_tbl->i2c_read( + &(laser_led_ctrl->i2c_client), + debug_reg[i], + &local_data, conf_array.data_type); + } + + kfree(conf_array.reg_setting); + kfree(debug_reg); + + return rc; +} + +static int32_t msm_laser_led_config(struct msm_laser_led_ctrl_t *laser_led_ctrl, + void __user *argp) +{ + int32_t rc = -EINVAL; + enum msm_laser_led_cfg_type_t cfg_type; + +#ifdef CONFIG_COMPAT + struct msm_laser_led_cfg_data_t32 __user *laser_led_data = + (struct msm_laser_led_cfg_data_t32 __user *) argp; +#else + struct msm_laser_led_cfg_data_t __user *laser_led_data = + (struct msm_laser_led_cfg_data_t __user *) argp; +#endif + + mutex_lock(laser_led_ctrl->laser_led_mutex); + + if (copy_from_user(&(cfg_type), + &(laser_led_data->cfg_type), + sizeof(enum msm_laser_led_cfg_type_t))) { + pr_err("%s:%d failed\n", __func__, __LINE__); + mutex_unlock(laser_led_ctrl->laser_led_mutex); + return -EFAULT; + } + + CDBG("type %d\n", cfg_type); + + switch (cfg_type) { + case CFG_LASER_LED_INIT: + rc = msm_laser_led_init(laser_led_ctrl, laser_led_data); + break; + case CFG_LASER_LED_CONTROL: +#ifdef CONFIG_COMPAT + if (is_compat_task()) + rc = msm_laser_led_control32(laser_led_ctrl, argp); + else +#endif + rc = msm_laser_led_control(laser_led_ctrl, argp); + break; + default: + rc = -EFAULT; + break; + } + + mutex_unlock(laser_led_ctrl->laser_led_mutex); + + CDBG("Exit: type %d\n", cfg_type); + + return rc; +} + +static long msm_laser_led_subdev_ioctl(struct v4l2_subdev *sd, + unsigned int cmd, void *arg) +{ + struct msm_laser_led_ctrl_t *lctrl = NULL; + void __user *argp = (void __user *)arg; + + CDBG("Enter\n"); + + if (!sd) { + pr_err(" v4l2 ir led subdevice is NULL\n"); + return -EINVAL; + } + lctrl = v4l2_get_subdevdata(sd); + if (!lctrl) { + pr_err("lctrl NULL\n"); + return -EINVAL; + } + switch (cmd) { + case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID: + return msm_laser_led_get_subdev_id(lctrl, argp); + case VIDIOC_MSM_LASER_LED_CFG: + return msm_laser_led_config(lctrl, argp); + case MSM_SD_NOTIFY_FREEZE: + return 0; + case MSM_SD_SHUTDOWN: + if (!lctrl->i2c_client.i2c_func_tbl) { + pr_err("a_ctrl->i2c_client.i2c_func_tbl NULL\n"); + return -EINVAL; + } + return msm_laser_led_close(sd, NULL); + + default: + pr_err("invalid cmd %d\n", cmd); + return -ENOIOCTLCMD; + } + CDBG("Exit\n"); +} + +static struct v4l2_subdev_core_ops msm_laser_led_subdev_core_ops = { + .ioctl = msm_laser_led_subdev_ioctl, +}; + +static struct v4l2_subdev_ops msm_laser_led_subdev_ops = { + .core = &msm_laser_led_subdev_core_ops, +}; + +static const struct v4l2_subdev_internal_ops msm_laser_led_internal_ops = { + .close = msm_laser_led_close, +}; + +static int32_t msm_laser_led_platform_probe(struct platform_device *pdev) +{ + int32_t rc = 0; + struct msm_laser_led_ctrl_t *laser_led_ctrl = NULL; + struct msm_camera_cci_client *cci_client = NULL; + + CDBG("Enter\n"); + if (!pdev->dev.of_node) { + pr_err("IR LED device node is not present in device tree\n"); + return -EINVAL; + } + + laser_led_ctrl = devm_kzalloc(&pdev->dev, + sizeof(struct msm_laser_led_ctrl_t), GFP_KERNEL); + if (!laser_led_ctrl) + return -ENOMEM; + + laser_led_ctrl->pdev = pdev; + + rc = of_property_read_u32((&pdev->dev)->of_node, "cell-index", + &pdev->id); + CDBG("cell-index %d, rc %d\n", pdev->id, rc); + if (rc < 0) { + kfree(laser_led_ctrl); + pr_err("reading cell index failed: rc %d\n", rc); + return rc; + } + + rc = of_property_read_u32((&pdev->dev)->of_node, "qcom,cci-master", + &laser_led_ctrl->cci_master); + CDBG("qcom,cci-master %d, rc %d\n", laser_led_ctrl->cci_master, rc); + if (rc < 0 || laser_led_ctrl->cci_master >= MASTER_MAX) { + kfree(laser_led_ctrl); + pr_err("invalid cci master info: rc %d\n", rc); + return rc; + } + + laser_led_ctrl->laser_led_state = MSM_CAMERA_LASER_LED_RELEASE; + laser_led_ctrl->power_info.dev = &laser_led_ctrl->pdev->dev; + laser_led_ctrl->laser_led_device_type = MSM_CAMERA_PLATFORM_DEVICE; + laser_led_ctrl->i2c_client.i2c_func_tbl = &msm_sensor_cci_func_tbl; + laser_led_ctrl->laser_led_mutex = &msm_laser_led_mutex; + + laser_led_ctrl->i2c_client.cci_client = kzalloc(sizeof( + struct msm_camera_cci_client), GFP_KERNEL); + if (!laser_led_ctrl->i2c_client.cci_client) + return -ENOMEM; + + cci_client = laser_led_ctrl->i2c_client.cci_client; + cci_client->cci_subdev = msm_cci_get_subdev(); + cci_client->cci_i2c_master = laser_led_ctrl->cci_master; + + /* Initialize sub device */ + v4l2_subdev_init(&laser_led_ctrl->msm_sd.sd, &msm_laser_led_subdev_ops); + v4l2_set_subdevdata(&laser_led_ctrl->msm_sd.sd, laser_led_ctrl); + + laser_led_ctrl->msm_sd.sd.internal_ops = &msm_laser_led_internal_ops; + laser_led_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + snprintf(laser_led_ctrl->msm_sd.sd.name, + ARRAY_SIZE(laser_led_ctrl->msm_sd.sd.name), + "msm_camera_laser_led"); + media_entity_init(&laser_led_ctrl->msm_sd.sd.entity, 0, NULL, 0); + laser_led_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV; + laser_led_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_LASER_LED; + laser_led_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x1; + msm_sd_register(&laser_led_ctrl->msm_sd); + + laser_led_ctrl->laser_led_state = MSM_CAMERA_LASER_LED_RELEASE; + + CDBG("laser_led sd name = %s\n", + laser_led_ctrl->msm_sd.sd.entity.name); + msm_laser_led_v4l2_subdev_fops = v4l2_subdev_fops; +#ifdef CONFIG_COMPAT + msm_laser_led_v4l2_subdev_fops.compat_ioctl32 = + msm_laser_led_subdev_fops_ioctl; +#endif + laser_led_ctrl->msm_sd.sd.devnode->fops = + &msm_laser_led_v4l2_subdev_fops; + + CDBG("probe success\n"); + return rc; +} + +MODULE_DEVICE_TABLE(of, msm_laser_led_dt_match); + +static struct platform_driver msm_laser_led_platform_driver = { + .probe = msm_laser_led_platform_probe, + .driver = { + .name = "qcom,laser-led", + .owner = THIS_MODULE, + .of_match_table = msm_laser_led_dt_match, + }, +}; + +static int __init msm_laser_led_init_module(void) +{ + int32_t rc; + + CDBG("Enter\n"); + rc = platform_driver_register(&msm_laser_led_platform_driver); + if (!rc) { + CDBG("Exit\n"); + return rc; + } + pr_err("laser-led driver register failed: %d\n", rc); + + return rc; +} + +static void __exit msm_laser_led_exit_module(void) +{ + platform_driver_unregister(&msm_laser_led_platform_driver); +} + +module_init(msm_laser_led_init_module); +module_exit(msm_laser_led_exit_module); +MODULE_DESCRIPTION("MSM IR LED"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.h b/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.h new file mode 100644 index 000000000000..d5cb8b435d12 --- /dev/null +++ b/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.h @@ -0,0 +1,57 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef MSM_LASER_LED_H +#define MSM_LASER_LED_H + +#include <linux/i2c.h> +#include <linux/gpio.h> +#include <soc/qcom/camera2.h> +#include <media/v4l2-subdev.h> +#include <media/msmb_camera.h> +#include <linux/platform_device.h> +#include <media/v4l2-ioctl.h> +#include <media/msm_cam_sensor.h> +#include "msm_camera_i2c.h" +#include "msm_camera_dt_util.h" +#include "msm_camera_io_util.h" +#include "msm_sd.h" + + +#define DEFINE_MSM_MUTEX(mutexname) \ + static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +enum msm_camera_laser_led_state_t { + MSM_CAMERA_LASER_LED_INIT, + MSM_CAMERA_LASER_LED_RELEASE, +}; + +struct msm_laser_led_ctrl_t; + +struct msm_laser_led_ctrl_t { + struct msm_sd_subdev msm_sd; + struct platform_device *pdev; + struct msm_laser_led_func_t *func_tbl; + struct msm_camera_power_ctrl_t power_info; + struct i2c_driver *i2c_driver; + struct platform_driver *pdriver; + struct msm_camera_i2c_client i2c_client; + enum msm_camera_device_type_t laser_led_device_type; + struct v4l2_subdev sdev; + struct v4l2_subdev_ops *laser_led_v4l2_subdev_ops; + struct mutex *laser_led_mutex; + enum msm_camera_laser_led_state_t laser_led_state; + enum cci_i2c_master_t cci_master; +}; + +#endif diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c index 57bc392f54fd..167ed5492088 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c +++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c @@ -297,6 +297,45 @@ static int32_t msm_sensor_fill_actuator_subdevid_by_name( return rc; } +static int32_t msm_sensor_fill_laser_led_subdevid_by_name( + struct msm_sensor_ctrl_t *s_ctrl) +{ + int32_t rc = 0; + struct device_node *src_node = NULL; + uint32_t val = 0; + int32_t *laser_led_subdev_id; + struct msm_sensor_info_t *sensor_info; + struct device_node *of_node = s_ctrl->of_node; + + if (!of_node) + return -EINVAL; + + sensor_info = s_ctrl->sensordata->sensor_info; + laser_led_subdev_id = &sensor_info->subdev_id[SUB_MODULE_LASER_LED]; + /* set sudev id to -1 and try to found new id */ + *laser_led_subdev_id = -1; + + + src_node = of_parse_phandle(of_node, "qcom,laserled-src", 0); + if (!src_node) { + CDBG("%s:%d src_node NULL\n", __func__, __LINE__); + } else { + rc = of_property_read_u32(src_node, "cell-index", &val); + CDBG("%s qcom,laser led cell index %d, rc %d\n", __func__, + val, rc); + of_node_put(src_node); + src_node = NULL; + if (rc < 0) { + pr_err("%s cell index not found %d\n", + __func__, __LINE__); + return -EINVAL; + } + *laser_led_subdev_id = val; + } + + return rc; +} + static int32_t msm_sensor_fill_flash_subdevid_by_name( struct msm_sensor_ctrl_t *s_ctrl) { @@ -981,6 +1020,11 @@ CSID_TG: pr_err("%s failed %d\n", __func__, __LINE__); goto free_camera_info; } + rc = msm_sensor_fill_laser_led_subdevid_by_name(s_ctrl); + if (rc < 0) { + pr_err("%s failed %d\n", __func__, __LINE__); + goto free_camera_info; + } rc = msm_sensor_fill_ois_subdevid_by_name(s_ctrl); if (rc < 0) { diff --git a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c index 302a7b16bc26..d3d48b0bbe4c 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c +++ b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c @@ -33,6 +33,30 @@ static int32_t msm_ois_power_down(struct msm_ois_ctrl_t *o_ctrl); static struct i2c_driver msm_ois_i2c_driver; +static int32_t data_type_to_num_bytes( + enum msm_camera_i2c_data_type data_type) +{ + int32_t ret_val; + + switch (data_type) { + case MSM_CAMERA_I2C_BYTE_DATA: + ret_val = 1; + break; + case MSM_CAMERA_I2C_WORD_DATA: + ret_val = 2; + break; + case MSM_CAMERA_I2C_DWORD_DATA: + ret_val = 4; + break; + default: + pr_err("unsupported data type: %d\n", + data_type); + ret_val = 1; + break; + } + return ret_val; +} + static int32_t msm_ois_download(struct msm_ois_ctrl_t *o_ctrl) { uint16_t bytes_in_tx = 0; @@ -155,7 +179,9 @@ static int32_t msm_ois_write_settings(struct msm_ois_ctrl_t *o_ctrl, uint16_t size, struct reg_settings_ois_t *settings) { int32_t rc = -EFAULT; - int32_t i = 0; + int32_t i = 0, num_byte_seq = 0; + uint8_t *reg_data_seq; + struct msm_camera_i2c_seq_reg_array *reg_setting; CDBG("Enter\n"); @@ -233,13 +259,51 @@ static int32_t msm_ois_write_settings(struct msm_ois_ctrl_t *o_ctrl, settings[i].data_type); break; } + break; } + case MSM_OIS_READ: { + switch (settings[i].data_type) { + case MSM_CAMERA_I2C_BYTE_DATA: + case MSM_CAMERA_I2C_WORD_DATA: + case MSM_CAMERA_I2C_DWORD_DATA: + + num_byte_seq = + data_type_to_num_bytes + (settings[i].data_type); + reg_data_seq = kzalloc(sizeof(uint32_t), + GFP_KERNEL); + if (!reg_data_seq) + return -ENOMEM; + + rc = msm_camera_cci_i2c_read_seq + (&o_ctrl->i2c_client, + settings[i].reg_addr, + reg_data_seq, + num_byte_seq); + + memcpy(&settings[i].reg_data, + reg_data_seq, sizeof(uint32_t)); + + CDBG("ois data read 0x%x from address 0x%x", + settings[i].reg_addr, + settings[i].reg_data); + + kfree(reg_data_seq); + reg_data_seq = NULL; + + break; + default: + pr_err("Unsupport data type for MSM_OIS_READ: %d\n", + settings[i].data_type); + break; + } + break; } if (rc < 0) break; + } } - CDBG("Exit\n"); return rc; } @@ -348,7 +412,7 @@ static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl, struct msm_ois_set_info_t *set_info) { struct reg_settings_ois_t *settings = NULL; - int32_t rc = 0; + int32_t rc = 0, i = 0; struct msm_camera_cci_client *cci_client = NULL; CDBG("Enter\n"); @@ -390,6 +454,18 @@ static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl, rc = msm_ois_write_settings(o_ctrl, set_info->ois_params.setting_size, settings); + + for (i = 0; i < set_info->ois_params.setting_size; i++) { + if (set_info->ois_params.settings[i].i2c_operation + == MSM_OIS_READ) { + set_info->ois_params.settings[i].reg_data = + settings[i].reg_data; + CDBG("ois_data at addr 0x%x is 0x%x", + set_info->ois_params.settings[i].reg_addr, + set_info->ois_params.settings[i].reg_data); + } + } + kfree(settings); if (rc < 0) { pr_err("Error\n"); @@ -402,7 +478,6 @@ static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl, return rc; } - static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl, void __user *argp) { diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c index 2d2296893140..9f3e2cc3a72f 100644 --- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c +++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c @@ -523,13 +523,17 @@ static ssize_t mpq_sdmx_log_level_write(struct file *fp, int level; struct mpq_demux *mpq_demux = fp->private_data; - if (count >= 16) + if (count == 0 || count >= 16) return -EINVAL; - ret_count = simple_write_to_buffer(user_str, 16, position, user_buffer, + memset(user_str, '\0', sizeof(user_str)); + + ret_count = simple_write_to_buffer(user_str, 15, position, user_buffer, count); if (ret_count < 0) return ret_count; + else if (ret_count == 0) + return -EINVAL; ret = kstrtoint(user_str, 0, &level); if (ret) diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c index abf20aef1256..422c7a590a45 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c @@ -2003,8 +2003,10 @@ static void sde_rotator_cancel_request(struct sde_rot_mgr *mgr, sde_rot_mgr_unlock(mgr); for (i = req->count - 1; i >= 0; i--) { entry = req->entries + i; - flush_kthread_worker(&entry->commitq->rot_kw); - flush_kthread_worker(&entry->doneq->rot_kw); + if (entry->commitq) + flush_kthread_worker(&entry->commitq->rot_kw); + if (entry->doneq) + flush_kthread_worker(&entry->doneq->rot_kw); } sde_rot_mgr_lock(mgr); SDEROT_DBG("cancel work done\n"); diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c index 037c6f3b12ab..53de11d7abf1 100644 --- a/drivers/media/platform/msm/vidc/hfi_packetization.c +++ b/drivers/media/platform/msm/vidc/hfi_packetization.c @@ -2180,6 +2180,15 @@ int create_pkt_cmd_session_set_property( pkt->size += sizeof(u32) + sizeof(struct hfi_iframe_size); break; } + case HAL_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAME: + { + create_pkt_enable(pkt->rg_property_data, + HFI_PROPERTY_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAMES, + ((struct hal_enable *)pdata)->enable); + pkt->size += sizeof(u32) + sizeof(struct hfi_enable); + break; + } + /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */ case HAL_CONFIG_BUFFER_REQUIREMENTS: case HAL_CONFIG_PRIORITY: diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c index 0f6389370643..c80f535d95e1 100644 --- a/drivers/media/platform/msm/vidc/msm_vdec.c +++ b/drivers/media/platform/msm/vidc/msm_vdec.c @@ -847,6 +847,14 @@ int msm_vdec_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i) return -EINVAL; } dprintk(VIDC_DBG, "Calling streamoff\n"); + + if (!inst->in_reconfig) { + rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE); + if (rc) + dprintk(VIDC_ERR, + "Failed to move inst: %pK to res done state\n", inst); + } + mutex_lock(&q->lock); rc = vb2_streamoff(&q->vb2_bufq, i); mutex_unlock(&q->lock); @@ -1618,6 +1626,8 @@ static int set_max_internal_buffers_size(struct msm_vidc_inst *inst) get_buff_req_buffer(inst, internal_buffers[i].type); internal_buffers[i].size = internal_buffers[i].req ? internal_buffers[i].req->buffer_size : 0; + if (internal_buffers[i].req == NULL) + continue; rc = allocate_and_set_internal_bufs(inst, internal_buffers[i].req, diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c index e4698e0cdcd8..ec6695a670b0 100644 --- a/drivers/media/platform/msm/vidc/msm_venc.c +++ b/drivers/media/platform/msm/vidc/msm_venc.c @@ -1423,7 +1423,16 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = { (1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED)), .qmenu = iframe_sizes, }, - + { + .id = V4L2_CID_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME, + .name = "Send encoder output buffer for skipped frames", + .type = V4L2_CTRL_TYPE_BOOLEAN, + .minimum = V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE, + .maximum = V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_ENABLE, + .default_value = + V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE, + .step = 1, + } }; #define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls) @@ -3712,6 +3721,25 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) ctrl->val); pdata = &iframesize_type; break; + case V4L2_CID_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME: + property_id = HAL_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAME; + switch (ctrl->val) { + case V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_ENABLE: + enable.enable = 1; + break; + case V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE: + enable.enable = 0; + break; + default: + dprintk(VIDC_ERR, + "Invalid send skipped frames control value %d\n", + ctrl->val); + rc = -ENOTSUPP; + break; + } + pdata = &enable; + break; + default: dprintk(VIDC_ERR, "Unsupported index: %x\n", ctrl->id); rc = -ENOTSUPP; @@ -4620,6 +4648,12 @@ int msm_venc_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i) return -EINVAL; } dprintk(VIDC_DBG, "Calling streamoff on port: %d\n", i); + + rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE); + if (rc) + dprintk(VIDC_ERR, + "Failed to move inst: %pK to res done state\n", inst); + mutex_lock(&q->lock); rc = vb2_streamoff(&q->vb2_bufq, i); mutex_unlock(&q->lock); diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index 3677bb6e32e6..2eaae18bc2e9 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -1425,10 +1425,6 @@ void *msm_vidc_open(int core_id, int session_type) setup_event_queue(inst, &core->vdev[session_type].vdev); - mutex_lock(&core->lock); - list_add_tail(&inst->list, &core->instances); - mutex_unlock(&core->lock); - rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT_DONE); if (rc) { dprintk(VIDC_ERR, @@ -1442,15 +1438,15 @@ void *msm_vidc_open(int core_id, int session_type) goto fail_init; } + mutex_lock(&core->lock); + list_add_tail(&inst->list, &core->instances); + mutex_unlock(&core->lock); + inst->debugfs_root = msm_vidc_debugfs_init_inst(inst, core->debugfs_root); return inst; fail_init: - mutex_lock(&core->lock); - list_del(&inst->list); - mutex_unlock(&core->lock); - v4l2_fh_del(&inst->event_handler); v4l2_fh_exit(&inst->event_handler); vb2_queue_release(&inst->bufq[OUTPUT_PORT].vb2_bufq); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 1d910f4b235c..4cb8f92c4e38 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -1179,7 +1179,7 @@ static void handle_event_change(enum hal_command_response cmd, void *data) __func__, inst, &event_notify->packet_buffer, &event_notify->extra_data_buffer); - if (inst->state == MSM_VIDC_CORE_INVALID || + if (inst->state >= MSM_VIDC_STOP || inst->core->state == VIDC_CORE_INVALID) { dprintk(VIDC_DBG, "Event release buf ref received in invalid state - discard\n"); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c index 5c13b6fef3ec..2be52b10c84b 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c @@ -445,7 +445,7 @@ struct dentry *msm_vidc_debugfs_init_inst(struct msm_vidc_inst *inst, dprintk(VIDC_ERR, "Invalid params, inst: %pK\n", inst); goto exit; } - snprintf(debugfs_name, MAX_DEBUGFS_NAME, "inst_%p", inst); + snprintf(debugfs_name, MAX_DEBUGFS_NAME, "inst_%pK", inst); idata = kzalloc(sizeof(struct core_inst_pair), GFP_KERNEL); if (!idata) { diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index 6cc5f9f50ba1..d946b035b284 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -243,6 +243,7 @@ enum hal_property { HAL_PARAM_VENC_H264_TRANSFORM_8x8, HAL_PARAM_VENC_VIDEO_SIGNAL_INFO, HAL_PARAM_VENC_IFRAMESIZE_TYPE, + HAL_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAME }; enum hal_domain { diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h index 31af06cd88ef..1218f0a86bc4 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h @@ -388,6 +388,8 @@ struct hfi_buffer_info { (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x033) #define HFI_PROPERTY_PARAM_VENC_IFRAMESIZE \ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x034) +#define HFI_PROPERTY_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAMES \ + (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x035) #define HFI_PROPERTY_CONFIG_VENC_COMMON_START \ (HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000) diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c index a32659fcd266..efc21b1da211 100644 --- a/drivers/media/rc/ir-lirc-codec.c +++ b/drivers/media/rc/ir-lirc-codec.c @@ -254,7 +254,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, return 0; case LIRC_GET_REC_RESOLUTION: - val = dev->rx_resolution; + val = dev->rx_resolution / 1000; break; case LIRC_SET_WIDEBAND_RECEIVER: diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index d86795bf9453..52f75b1faec0 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -565,6 +565,14 @@ config QPNP_MISC peripheral. The MISC peripheral holds the USB ID interrupt and the driver provides an API to check if this interrupt is available on the current PMIC chip. + +config UID_SYS_STATS_DEBUG + bool "Per-TASK statistics" + depends on UID_SYS_STATS + default n + help + Per TASK based io statistics exported to /proc/uid_io + config MEMORY_STATE_TIME tristate "Memory freq/bandwidth time statistics" depends on PROFILING diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index a2661381ddfc..d2774197fe58 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -125,6 +125,11 @@ #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ +#define MEI_DEV_ID_LBG 0xA1BA /* Lewisburg (SPT) */ + +#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ +#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 01e20384ac44..adab5bbb642a 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -86,10 +86,14 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)}, {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)}, {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index cf897947fff2..8c48a5c05bbe 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -3122,6 +3122,7 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data, struct qseecom_send_cmd_req *req) { int ret = 0; + int ret2 = 0; u32 reqd_len_sb_in = 0; struct qseecom_client_send_data_ireq send_data_req = {0}; struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0}; @@ -3220,32 +3221,38 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data, if (ret) { pr_err("scm_call() failed with err: %d (app_id = %d)\n", ret, data->client.app_id); - return ret; + goto exit; } if (qseecom.qsee_reentrancy_support) { ret = __qseecom_process_reentrancy(&resp, ptr_app, data); + if (ret) + goto exit; } else { if (resp.result == QSEOS_RESULT_INCOMPLETE) { ret = __qseecom_process_incomplete_cmd(data, &resp); if (ret) { pr_err("process_incomplete_cmd failed err: %d\n", ret); - return ret; + goto exit; } } else { if (resp.result != QSEOS_RESULT_SUCCESS) { pr_err("Response result %d not supported\n", resp.result); ret = -EINVAL; + goto exit; } } } - ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, +exit: + ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, data->client.sb_virt, data->client.sb_length, ION_IOC_INV_CACHES); - if (ret) - pr_err("cache operation failed %d\n", ret); + if (ret2) { + pr_err("cache operation failed %d\n", ret2); + return ret2; + } return ret; } @@ -6566,6 +6573,7 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data, bool found_app = false; unsigned long flags; int ret = 0; + int ret2 = 0; uint32_t reqd_len_sb_in = 0; void *cmd_buf = NULL; size_t cmd_len; @@ -6675,43 +6683,47 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data, if (ret) { pr_err("scm_call() failed with err: %d (app_id = %d)\n", ret, data->client.app_id); - return ret; + goto exit; } if (qseecom.qsee_reentrancy_support) { ret = __qseecom_process_reentrancy(&resp, ptr_app, data); + if (ret) + goto exit; } else { if (resp.result == QSEOS_RESULT_INCOMPLETE) { ret = __qseecom_process_incomplete_cmd(data, &resp); if (ret) { pr_err("process_incomplete_cmd failed err: %d\n", ret); - return ret; + goto exit; } } else { if (resp.result != QSEOS_RESULT_SUCCESS) { pr_err("Response result %d not supported\n", resp.result); ret = -EINVAL; + goto exit; } } } - ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, +exit: + ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, data->client.sb_virt, data->client.sb_length, ION_IOC_INV_CACHES); - if (ret) { + if (ret2) { pr_err("cache operation failed %d\n", ret); - return ret; + return ret2; } if ((cmd_id == QSEOS_TEE_OPEN_SESSION) || (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) { - ret = __qseecom_update_qteec_req_buf( + ret2 = __qseecom_update_qteec_req_buf( (struct qseecom_qteec_modfd_req *)req, data, true); - if (ret) - return ret; + if (ret2) + return ret2; } - return 0; + return ret; } static int qseecom_qteec_open_session(struct qseecom_dev_handle *data, diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c index 3c9d311106cd..031320e51522 100644 --- a/drivers/misc/uid_sys_stats.c +++ b/drivers/misc/uid_sys_stats.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/profile.h> #include <linux/rtmutex.h> @@ -52,6 +53,15 @@ struct io_stats { #define UID_STATE_DEAD_TASKS 4 #define UID_STATE_SIZE 5 +#define MAX_TASK_COMM_LEN 256 + +struct task_entry { + char comm[MAX_TASK_COMM_LEN]; + pid_t pid; + struct io_stats io[UID_STATE_SIZE]; + struct hlist_node hash; +}; + struct uid_entry { uid_t uid; cputime_t utime; @@ -61,8 +71,231 @@ struct uid_entry { int state; struct io_stats io[UID_STATE_SIZE]; struct hlist_node hash; +#ifdef CONFIG_UID_SYS_STATS_DEBUG + DECLARE_HASHTABLE(task_entries, UID_HASH_BITS); +#endif }; +static u64 compute_write_bytes(struct task_struct *task) +{ + if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes) + return 0; + + return task->ioac.write_bytes - task->ioac.cancelled_write_bytes; +} + +static void compute_io_bucket_stats(struct io_stats *io_bucket, + struct io_stats *io_curr, + struct io_stats *io_last, + struct io_stats *io_dead) +{ + /* tasks could switch to another uid group, but its io_last in the + * previous uid group could still be positive. + * therefore before each update, do an overflow check first + */ + int64_t delta; + + delta = io_curr->read_bytes + io_dead->read_bytes - + io_last->read_bytes; + io_bucket->read_bytes += delta > 0 ? delta : 0; + delta = io_curr->write_bytes + io_dead->write_bytes - + io_last->write_bytes; + io_bucket->write_bytes += delta > 0 ? delta : 0; + delta = io_curr->rchar + io_dead->rchar - io_last->rchar; + io_bucket->rchar += delta > 0 ? delta : 0; + delta = io_curr->wchar + io_dead->wchar - io_last->wchar; + io_bucket->wchar += delta > 0 ? delta : 0; + delta = io_curr->fsync + io_dead->fsync - io_last->fsync; + io_bucket->fsync += delta > 0 ? delta : 0; + + io_last->read_bytes = io_curr->read_bytes; + io_last->write_bytes = io_curr->write_bytes; + io_last->rchar = io_curr->rchar; + io_last->wchar = io_curr->wchar; + io_last->fsync = io_curr->fsync; + + memset(io_dead, 0, sizeof(struct io_stats)); +} + +#ifdef CONFIG_UID_SYS_STATS_DEBUG +static void get_full_task_comm(struct task_entry *task_entry, + struct task_struct *task) +{ + int i = 0, offset = 0, len = 0; + /* save one byte for terminating null character */ + int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1; + char buf[unused_len]; + struct mm_struct *mm = task->mm; + + /* fill the first TASK_COMM_LEN bytes with thread name */ + get_task_comm(task_entry->comm, task); + i = strlen(task_entry->comm); + while (i < TASK_COMM_LEN) + task_entry->comm[i++] = ' '; + + /* next the executable file name */ + if (mm) { + down_read(&mm->mmap_sem); + if (mm->exe_file) { + char *pathname = d_path(&mm->exe_file->f_path, buf, + unused_len); + + if (!IS_ERR(pathname)) { + len = strlcpy(task_entry->comm + i, pathname, + unused_len); + i += len; + task_entry->comm[i++] = ' '; + unused_len--; + } + } + up_read(&mm->mmap_sem); + } + unused_len -= len; + + /* fill the rest with command line argument + * replace each null or new line character + * between args in argv with whitespace */ + len = get_cmdline(task, buf, unused_len); + while (offset < len) { + if (buf[offset] != '\0' && buf[offset] != '\n') + task_entry->comm[i++] = buf[offset]; + else + task_entry->comm[i++] = ' '; + offset++; + } + + /* get rid of trailing whitespaces in case when arg is memset to + * zero before being reset in userspace + */ + while (task_entry->comm[i-1] == ' ') + i--; + task_entry->comm[i] = '\0'; +} + +static struct task_entry *find_task_entry(struct uid_entry *uid_entry, + struct task_struct *task) +{ + struct task_entry *task_entry; + + hash_for_each_possible(uid_entry->task_entries, task_entry, hash, + task->pid) { + if (task->pid == task_entry->pid) { + /* if thread name changed, update the entire command */ + int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN) + - task_entry->comm; + + if (strncmp(task_entry->comm, task->comm, len)) + get_full_task_comm(task_entry, task); + return task_entry; + } + } + return NULL; +} + +static struct task_entry *find_or_register_task(struct uid_entry *uid_entry, + struct task_struct *task) +{ + struct task_entry *task_entry; + pid_t pid = task->pid; + + task_entry = find_task_entry(uid_entry, task); + if (task_entry) + return task_entry; + + task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC); + if (!task_entry) + return NULL; + + get_full_task_comm(task_entry, task); + + task_entry->pid = pid; + hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid); + + return task_entry; +} + +static void remove_uid_tasks(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + struct hlist_node *tmp_task; + + hash_for_each_safe(uid_entry->task_entries, bkt_task, + tmp_task, task_entry, hash) { + hash_del(&task_entry->hash); + kfree(task_entry); + } +} + +static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0, + sizeof(struct io_stats)); + } +} + +static void add_uid_tasks_io_stats(struct uid_entry *uid_entry, + struct task_struct *task, int slot) +{ + struct task_entry *task_entry = find_or_register_task(uid_entry, task); + struct io_stats *task_io_slot = &task_entry->io[slot]; + + task_io_slot->read_bytes += task->ioac.read_bytes; + task_io_slot->write_bytes += compute_write_bytes(task); + task_io_slot->rchar += task->ioac.rchar; + task_io_slot->wchar += task->ioac.wchar; + task_io_slot->fsync += task->ioac.syscfs; +} + +static void compute_io_uid_tasks(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + compute_io_bucket_stats(&task_entry->io[uid_entry->state], + &task_entry->io[UID_STATE_TOTAL_CURR], + &task_entry->io[UID_STATE_TOTAL_LAST], + &task_entry->io[UID_STATE_DEAD_TASKS]); + } +} + +static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + /* Separated by comma because space exists in task comm */ + seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n", + task_entry->comm, + (unsigned long)task_entry->pid, + task_entry->io[UID_STATE_FOREGROUND].rchar, + task_entry->io[UID_STATE_FOREGROUND].wchar, + task_entry->io[UID_STATE_FOREGROUND].read_bytes, + task_entry->io[UID_STATE_FOREGROUND].write_bytes, + task_entry->io[UID_STATE_BACKGROUND].rchar, + task_entry->io[UID_STATE_BACKGROUND].wchar, + task_entry->io[UID_STATE_BACKGROUND].read_bytes, + task_entry->io[UID_STATE_BACKGROUND].write_bytes, + task_entry->io[UID_STATE_FOREGROUND].fsync, + task_entry->io[UID_STATE_BACKGROUND].fsync); + } +} +#else +static void remove_uid_tasks(struct uid_entry *uid_entry) {}; +static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {}; +static void add_uid_tasks_io_stats(struct uid_entry *uid_entry, + struct task_struct *task, int slot) {}; +static void compute_io_uid_tasks(struct uid_entry *uid_entry) {}; +static void show_io_uid_tasks(struct seq_file *m, + struct uid_entry *uid_entry) {} +#endif + static struct uid_entry *find_uid_entry(uid_t uid) { struct uid_entry *uid_entry; @@ -86,7 +319,9 @@ static struct uid_entry *find_or_register_uid(uid_t uid) return NULL; uid_entry->uid = uid; - +#ifdef CONFIG_UID_SYS_STATS_DEBUG + hash_init(uid_entry->task_entries); +#endif hash_add(hash_table, &uid_entry->hash, uid); return uid_entry; @@ -192,6 +427,7 @@ static ssize_t uid_remove_write(struct file *file, hash_for_each_possible_safe(hash_table, uid_entry, tmp, hash, (uid_t)uid_start) { if (uid_start == uid_entry->uid) { + remove_uid_tasks(uid_entry); hash_del(&uid_entry->hash); kfree(uid_entry); } @@ -208,13 +444,6 @@ static const struct file_operations uid_remove_fops = { .write = uid_remove_write, }; -static u64 compute_write_bytes(struct task_struct *task) -{ - if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes) - return 0; - - return task->ioac.write_bytes - task->ioac.cancelled_write_bytes; -} static void add_uid_io_stats(struct uid_entry *uid_entry, struct task_struct *task, int slot) @@ -226,28 +455,8 @@ static void add_uid_io_stats(struct uid_entry *uid_entry, io_slot->rchar += task->ioac.rchar; io_slot->wchar += task->ioac.wchar; io_slot->fsync += task->ioac.syscfs; -} -static void compute_uid_io_bucket_stats(struct io_stats *io_bucket, - struct io_stats *io_curr, - struct io_stats *io_last, - struct io_stats *io_dead) -{ - io_bucket->read_bytes += io_curr->read_bytes + io_dead->read_bytes - - io_last->read_bytes; - io_bucket->write_bytes += io_curr->write_bytes + io_dead->write_bytes - - io_last->write_bytes; - io_bucket->rchar += io_curr->rchar + io_dead->rchar - io_last->rchar; - io_bucket->wchar += io_curr->wchar + io_dead->wchar - io_last->wchar; - io_bucket->fsync += io_curr->fsync + io_dead->fsync - io_last->fsync; - - io_last->read_bytes = io_curr->read_bytes; - io_last->write_bytes = io_curr->write_bytes; - io_last->rchar = io_curr->rchar; - io_last->wchar = io_curr->wchar; - io_last->fsync = io_curr->fsync; - - memset(io_dead, 0, sizeof(struct io_stats)); + add_uid_tasks_io_stats(uid_entry, task, slot); } static void update_io_stats_all_locked(void) @@ -258,9 +467,11 @@ static void update_io_stats_all_locked(void) unsigned long bkt; uid_t uid; - hash_for_each(hash_table, bkt, uid_entry, hash) + hash_for_each(hash_table, bkt, uid_entry, hash) { memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0, sizeof(struct io_stats)); + set_io_uid_tasks_zero(uid_entry); + } rcu_read_lock(); do_each_thread(temp, task) { @@ -274,10 +485,11 @@ static void update_io_stats_all_locked(void) rcu_read_unlock(); hash_for_each(hash_table, bkt, uid_entry, hash) { - compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state], + compute_io_bucket_stats(&uid_entry->io[uid_entry->state], &uid_entry->io[UID_STATE_TOTAL_CURR], &uid_entry->io[UID_STATE_TOTAL_LAST], &uid_entry->io[UID_STATE_DEAD_TASKS]); + compute_io_uid_tasks(uid_entry); } } @@ -288,6 +500,7 @@ static void update_io_stats_uid_locked(struct uid_entry *uid_entry) memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0, sizeof(struct io_stats)); + set_io_uid_tasks_zero(uid_entry); rcu_read_lock(); do_each_thread(temp, task) { @@ -297,12 +510,14 @@ static void update_io_stats_uid_locked(struct uid_entry *uid_entry) } while_each_thread(temp, task); rcu_read_unlock(); - compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state], + compute_io_bucket_stats(&uid_entry->io[uid_entry->state], &uid_entry->io[UID_STATE_TOTAL_CURR], &uid_entry->io[UID_STATE_TOTAL_LAST], &uid_entry->io[UID_STATE_DEAD_TASKS]); + compute_io_uid_tasks(uid_entry); } + static int uid_io_show(struct seq_file *m, void *v) { struct uid_entry *uid_entry; @@ -314,21 +529,22 @@ static int uid_io_show(struct seq_file *m, void *v) hash_for_each(hash_table, bkt, uid_entry, hash) { seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", - uid_entry->uid, - uid_entry->io[UID_STATE_FOREGROUND].rchar, - uid_entry->io[UID_STATE_FOREGROUND].wchar, - uid_entry->io[UID_STATE_FOREGROUND].read_bytes, - uid_entry->io[UID_STATE_FOREGROUND].write_bytes, - uid_entry->io[UID_STATE_BACKGROUND].rchar, - uid_entry->io[UID_STATE_BACKGROUND].wchar, - uid_entry->io[UID_STATE_BACKGROUND].read_bytes, - uid_entry->io[UID_STATE_BACKGROUND].write_bytes, - uid_entry->io[UID_STATE_FOREGROUND].fsync, - uid_entry->io[UID_STATE_BACKGROUND].fsync); + uid_entry->uid, + uid_entry->io[UID_STATE_FOREGROUND].rchar, + uid_entry->io[UID_STATE_FOREGROUND].wchar, + uid_entry->io[UID_STATE_FOREGROUND].read_bytes, + uid_entry->io[UID_STATE_FOREGROUND].write_bytes, + uid_entry->io[UID_STATE_BACKGROUND].rchar, + uid_entry->io[UID_STATE_BACKGROUND].wchar, + uid_entry->io[UID_STATE_BACKGROUND].read_bytes, + uid_entry->io[UID_STATE_BACKGROUND].write_bytes, + uid_entry->io[UID_STATE_FOREGROUND].fsync, + uid_entry->io[UID_STATE_BACKGROUND].fsync); + + show_io_uid_tasks(m, uid_entry); } rt_mutex_unlock(&uid_lock); - return 0; } diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index d39b4056c169..063e00517660 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1224,16 +1224,16 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, ioc_err = __mmc_blk_ioctl_cmd(card, md, idata); - mmc_put_card(card); - - err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); - if (mmc_card_cmdq(card)) { if (mmc_cmdq_halt(card->host, false)) pr_err("%s: %s: cmdq unhalt failed\n", mmc_hostname(card->host), __func__); } + mmc_put_card(card); + + err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); + cmd_done: mmc_blk_put(md); cmd_err: @@ -3894,6 +3894,7 @@ static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card, struct mmc_host *host = card->host; struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx; u8 part_config = card->ext_csd.part_config; + int ret = 0, err = 0; if ((main_md->part_curr == md->part_type) && (card->part_curr == md->part_type)) @@ -3903,40 +3904,70 @@ static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card, card->ext_csd.cmdq_support && (md->flags & MMC_BLK_CMD_QUEUE))); - if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state)) - WARN_ON(mmc_cmdq_halt(host, true)); + if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state)) { + ret = mmc_cmdq_halt(host, true); + if (ret) { + pr_err("%s: %s: halt: failed: %d\n", + mmc_hostname(host), __func__, ret); + goto out; + } + } /* disable CQ mode in card */ if (mmc_card_cmdq(card)) { - WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ, 0, - card->ext_csd.generic_cmd6_time)); + card->ext_csd.generic_cmd6_time); + if (ret) { + pr_err("%s: %s: cmdq mode disable failed %d\n", + mmc_hostname(host), __func__, ret); + goto cmdq_unhalt; + } mmc_card_clr_cmdq(card); } part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; part_config |= md->part_type; - WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, part_config, - card->ext_csd.part_time)); + card->ext_csd.part_time); + if (ret) { + pr_err("%s: %s: mmc_switch failure, %d -> %d , err = %d\n", + mmc_hostname(host), __func__, main_md->part_curr, + md->part_type, ret); + goto cmdq_switch; + } card->ext_csd.part_config = part_config; card->part_curr = md->part_type; main_md->part_curr = md->part_type; - WARN_ON(mmc_blk_cmdq_switch(card, md, true)); - WARN_ON(mmc_cmdq_halt(host, false)); - - return 0; +cmdq_switch: + err = mmc_blk_cmdq_switch(card, md, true); + if (err) { + pr_err("%s: %s: mmc_blk_cmdq_switch failed: %d\n", + mmc_hostname(host), __func__, err); + ret = err; + } +cmdq_unhalt: + err = mmc_cmdq_halt(host, false); + if (err) { + pr_err("%s: %s: unhalt: failed: %d\n", + mmc_hostname(host), __func__, err); + ret = err; + } +out: + return ret; } static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req) { - int ret; + int ret, err = 0; struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; + struct mmc_host *host = card->host; unsigned int cmd_flags = req ? req->cmd_flags : 0; mmc_get_card(card); @@ -3958,9 +3989,20 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req) ret = mmc_blk_cmdq_part_switch(card, md); if (ret) { - pr_err("%s: %s: partition switch failed %d\n", + pr_err("%s: %s: partition switch failed %d, resetting cmdq\n", md->disk->disk_name, __func__, ret); - goto out; + + mmc_blk_cmdq_reset(host, false); + err = mmc_blk_cmdq_part_switch(card, md); + if (!err) { + pr_err("%s: %s: partition switch success err = %d\n", + md->disk->disk_name, __func__, err); + } else { + pr_err("%s: %s: partition switch failed err = %d\n", + md->disk->disk_name, __func__, err); + ret = err; + goto out; + } } if (req) { diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 311f6d639d06..548a9e8b72ae 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -401,6 +401,7 @@ int mmc_add_card(struct mmc_card *card) return ret; mmc_card_set_present(card); + device_enable_async_suspend(&card->dev); return 0; } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 0da9c5caea13..372f1fbbde4c 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -3301,6 +3301,13 @@ static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, pm_wakeup_event(mmc_dev(host), 5000); host->detect_change = 1; + /* + * Change in cd_gpio state, so make sure detection part is + * not overided because of manual resume. + */ + if (cd_irq && mmc_bus_manual_resume(host)) + host->ignore_bus_resume_flags = true; + mmc_schedule_delayed_work(&host->detect, delay); } @@ -4165,6 +4172,18 @@ int mmc_detect_card_removed(struct mmc_host *host) } EXPORT_SYMBOL(mmc_detect_card_removed); +/* + * This should be called to make sure that detect work(mmc_rescan) + * is completed.Drivers may use this function from async schedule/probe + * contexts to make sure that the bootdevice detection is completed on + * completion of async_schedule. + */ +void mmc_flush_detect_work(struct mmc_host *host) +{ + flush_delayed_work(&host->detect); +} +EXPORT_SYMBOL(mmc_flush_detect_work); + void mmc_rescan(struct work_struct *work) { unsigned long flags; @@ -4199,6 +4218,8 @@ void mmc_rescan(struct work_struct *work) host->bus_ops->detect(host); host->detect_change = 0; + if (host->ignore_bus_resume_flags) + host->ignore_bus_resume_flags = false; /* * Let mmc_bus_put() free the bus/bus_ops if we've found that @@ -4456,7 +4477,8 @@ int mmc_pm_notify(struct notifier_block *notify_block, spin_lock_irqsave(&host->lock, flags); host->rescan_disable = 0; - if (mmc_bus_manual_resume(host)) { + if (mmc_bus_manual_resume(host) && + !host->ignore_bus_resume_flags) { spin_unlock_irqrestore(&host->lock, flags); break; } diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index a28d6b98a042..6f4f81a370d8 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -2056,11 +2056,11 @@ reinit: } card->clk_scaling_lowest = host->f_min; - if ((card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS400) || - (card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS200)) + if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400) || + (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)) card->clk_scaling_highest = card->ext_csd.hs200_max_dtr; - else if ((card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS) || - (card->mmc_avail_type | EXT_CSD_CARD_TYPE_DDR_52)) + else if ((card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS) || + (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52)) card->clk_scaling_highest = card->ext_csd.hs_max_dtr; else card->clk_scaling_highest = card->csd.max_dtr; diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 5033107f6e26..21836eac001e 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -1237,7 +1237,10 @@ static int mmc_sd_suspend(struct mmc_host *host) if (!err) { pm_runtime_disable(&host->card->dev); pm_runtime_set_suspended(&host->card->dev); - } + /* if suspend fails, force mmc_detect_change during resume */ + } else if (mmc_bus_manual_resume(host)) + host->ignore_bus_resume_flags = true; + MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err); return err; @@ -1286,6 +1289,7 @@ static int _mmc_sd_resume(struct mmc_host *host) if (err) { pr_err("%s: %s: mmc_sd_init_card_failed (%d)\n", mmc_hostname(host), __func__, err); + mmc_power_off(host); goto out; } mmc_card_clr_suspended(host->card); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index ab4837128cb2..45d2f69f5f1a 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -142,6 +142,7 @@ #define CORE_START_CDC_TRAFFIC (1 << 6) #define CORE_PWRSAVE_DLL (1 << 3) +#define CORE_FIFO_ALT_EN (1 << 10) #define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13) #define CORE_DDR_CAL_EN (1 << 0) @@ -4154,7 +4155,7 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host, * starts coming. */ if ((major == 1) && ((minor == 0x42) || (minor == 0x46) || - (minor == 0x49))) + (minor == 0x49) || (minor >= 0x6b))) msm_host->use_14lpp_dll = true; /* Fake 3.0V support for SDIO devices which requires such voltage */ @@ -4484,6 +4485,14 @@ static int sdhci_msm_probe(struct platform_device *pdev) writel_relaxed(CORE_VENDOR_SPEC_POR_VAL, host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC); + /* + * Ensure SDHCI FIFO is enabled by disabling alternative FIFO + */ + writel_relaxed((readl_relaxed(host->ioaddr + + msm_host_offset->CORE_VENDOR_SPEC3) & + ~CORE_FIFO_ALT_EN), host->ioaddr + + msm_host_offset->CORE_VENDOR_SPEC3); + if (!msm_host->mci_removed) { /* Set HC_MODE_EN bit in HC_MODE register */ writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE)); @@ -4738,6 +4747,9 @@ static int sdhci_msm_probe(struct platform_device *pdev) mmc_hostname(host->mmc), __func__, ret); device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr); } + if (sdhci_msm_is_bootdevice(&pdev->dev)) + mmc_flush_detect_work(host->mmc); + /* Successful initialization */ goto out; @@ -5015,7 +5027,7 @@ static int sdhci_msm_suspend_noirq(struct device *dev) } static const struct dev_pm_ops sdhci_msm_pmops = { - SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume) + SET_LATE_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume) SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume, NULL) .suspend_noirq = sdhci_msm_suspend_noirq, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 0033fea0a800..5906bba0aeff 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -3041,11 +3041,6 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) * above in sdhci_cmd_irq(). */ if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) { - if (intmask & SDHCI_INT_DATA_TIMEOUT) { - host->cmd->error = -ETIMEDOUT; - tasklet_schedule(&host->finish_tasklet); - return; - } if (intmask & SDHCI_INT_DATA_END) { /* * Some cards handle busy-end interrupt @@ -3059,8 +3054,20 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) return; } if (host->quirks2 & - SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD) + SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD) { + pr_err_ratelimited("%s: %s: ignoring interrupt: 0x%08x due to DATATOUT_FOR_R1B quirk\n", + mmc_hostname(host->mmc), + __func__, intmask); + MMC_TRACE(host->mmc, + "%s: Quirk ignoring intr: 0x%08x\n", + __func__, intmask); return; + } + if (intmask & SDHCI_INT_DATA_TIMEOUT) { + host->cmd->error = -ETIMEDOUT; + tasklet_schedule(&host->finish_tasklet); + return; + } } pr_err("%s: Got data interrupt 0x%08x even " diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 300be7fd0f24..81aecb90ac8d 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -516,7 +516,7 @@ struct sdhci_host { * Some controllers may use PIO mode to workaround HW issues in ADMA for * eMMC tuning commands. */ -#define SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING (1 << 23) +#define SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING (1 << 29) int irq; /* Device IRQ */ diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig index a8001b41c81c..5b315573387e 100644 --- a/drivers/net/can/spi/Kconfig +++ b/drivers/net/can/spi/Kconfig @@ -12,4 +12,10 @@ config CAN_RH850 depends on HAS_DMA ---help--- Driver for the Renesas RH850 SPI CAN controller. + +config CAN_K61 + tristate "Freescale K61 SPI CAN controllers" + depends on SPI + ---help--- + Driver for the Freescale K61 SPI CAN controllers. endmenu diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile index e84da9b8d5ab..375a6cbfbb67 100644 --- a/drivers/net/can/spi/Makefile +++ b/drivers/net/can/spi/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o obj-$(CONFIG_CAN_RH850) += rh850.o +obj-${CONFIG_CAN_K61} += k61.o diff --git a/drivers/net/can/spi/k61.c b/drivers/net/can/spi/k61.c new file mode 100644 index 000000000000..9ce0ad854caa --- /dev/null +++ b/drivers/net/can/spi/k61.c @@ -0,0 +1,936 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <linux/workqueue.h> +#include <linux/spi/spi.h> +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/uaccess.h> + +#define DEBUG_K61 0 +#if DEBUG_K61 == 1 +#define LOGDI(...) dev_info(&priv_data->spidev->dev, __VA_ARGS__) +#define LOGNI(...) netdev_info(netdev, __VA_ARGS__) +#else +#define LOGDI(...) +#define LOGNI(...) +#endif +#define LOGDE(...) dev_err(&priv_data->spidev->dev, __VA_ARGS__) +#define LOGNE(...) netdev_err(netdev, __VA_ARGS__) + +#define MAX_TX_BUFFERS 1 +#define XFER_BUFFER_SIZE 64 +#define K61_CLOCK 120000000 +#define K61_MAX_CHANNELS 1 +#define K61_FW_QUERY_RETRY_COUNT 3 + +struct k61_can { + struct net_device *netdev; + struct spi_device *spidev; + + struct mutex spi_lock; /* SPI device lock */ + + struct workqueue_struct *tx_wq; + char *tx_buf, *rx_buf; + int xfer_length; + atomic_t msg_seq; + + atomic_t netif_queue_stop; + struct completion response_completion; + int reset; + int wait_cmd; + int cmd_result; + int bits_per_word; + int reset_delay_msec; +}; + +struct k61_netdev_privdata { + struct can_priv can; + struct k61_can *k61_can; +}; + +struct k61_tx_work { + struct work_struct work; + struct sk_buff *skb; + struct net_device *netdev; +}; + +/* Message definitions */ +struct spi_mosi { /* TLV for MOSI line */ + u8 cmd; + u8 len; + u16 seq; + u8 data[]; +} __packed; + +struct spi_miso { /* TLV for MISO line */ + u8 cmd; + u8 len; + u16 seq; /* should match seq field from request, or 0 for unsols */ + u8 data[]; +} __packed; + +#define CMD_GET_FW_VERSION 0x81 +#define CMD_CAN_SEND_FRAME 0x82 +#define CMD_CAN_ADD_FILTER 0x83 +#define CMD_CAN_REMOVE_FILTER 0x84 +#define CMD_CAN_RECEIVE_FRAME 0x85 +#define CMD_CAN_DATA_BUFF_ADD 0x87 +#define CMD_CAN_DATA_BUFF_REMOVE 0x88 +#define CMD_CAN_RELEASE_BUFFER 0x89 +#define CMD_CAN_DATA_BUFF_REMOVE_ALL 0x8A + +#define IOCTL_RELEASE_CAN_BUFFER (SIOCDEVPRIVATE + 0) +#define IOCTL_ENABLE_BUFFERING (SIOCDEVPRIVATE + 1) +#define IOCTL_ADD_FRAME_FILTER (SIOCDEVPRIVATE + 2) +#define IOCTL_REMOVE_FRAME_FILTER (SIOCDEVPRIVATE + 3) +#define IOCTL_DISABLE_BUFFERING (SIOCDEVPRIVATE + 5) +#define IOCTL_DISABLE_ALL_BUFFERING (SIOCDEVPRIVATE + 6) + +struct can_fw_resp { + u8 maj; + u8 min; + u8 ver; +} __packed; + +struct can_write_req { + u32 ts; + u32 mid; + u8 dlc; + u8 data[]; +} __packed; + +struct can_write_resp { + u8 err; +} __packed; + +struct can_receive_frame { + u32 ts; + u32 mid; + u8 dlc; + u8 data[]; +} __packed; + +struct can_add_filter_req { + u8 can_if; + u32 mid; + u32 mask; + u8 type; +} __packed; + +static struct can_bittiming_const k61_bittiming_const = { + .name = "k61", + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 4, + .brp_max = 1023, + .brp_inc = 1, +}; + +struct k61_add_can_buffer { + u8 can_if; + u32 mid; + u32 mask; +} __packed; + +struct k61_delete_can_buffer { + u8 can_if; + u32 mid; + u32 mask; +} __packed; + +static int k61_rx_message(struct k61_can *priv_data); + +static irqreturn_t k61_irq(int irq, void *priv) +{ + struct k61_can *priv_data = priv; + + LOGDI("k61_irq\n"); + k61_rx_message(priv_data); + return IRQ_HANDLED; +} + +static void k61_frame_error(struct k61_can *priv_data, + struct can_receive_frame *frame) +{ + struct can_frame *cf; + struct sk_buff *skb; + struct net_device *netdev; + + netdev = priv_data->netdev; + skb = alloc_can_err_skb(netdev, &cf); + if (!skb) { + LOGDE("skb alloc failed\n"); + return; + } + + cf->can_id |= CAN_ERR_BUSERROR; + cf->data[2] |= CAN_ERR_PROT_FORM; + netdev->stats.rx_errors++; + netif_rx(skb); + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += cf->can_dlc; +} + +static void k61_receive_frame(struct k61_can *priv_data, + struct can_receive_frame *frame) +{ + struct can_frame *cf; + struct sk_buff *skb; + struct skb_shared_hwtstamps *skt; + struct timeval tv; + static int msec; + struct net_device *netdev; + int i; + + if (frame->dlc > 8) { + LOGDE("can rx frame error\n"); + k61_frame_error(priv_data, frame); + return; + } + + netdev = priv_data->netdev; + skb = alloc_can_skb(netdev, &cf); + if (!skb) { + LOGDE("skb alloc failed\n"); + return; + } + + LOGDI("rcv frame %d %x %d %x %x %x %x %x %x %x %x\n", + frame->ts, frame->mid, frame->dlc, frame->data[0], + frame->data[1], frame->data[2], frame->data[3], frame->data[4], + frame->data[5], frame->data[6], frame->data[7]); + cf->can_id = le32_to_cpu(frame->mid); + cf->can_dlc = get_can_dlc(frame->dlc); + + for (i = 0; i < cf->can_dlc; i++) + cf->data[i] = frame->data[i]; + + msec = le32_to_cpu(frame->ts); + tv.tv_sec = msec / 1000; + tv.tv_usec = (msec - tv.tv_sec * 1000) * 1000; + skt = skb_hwtstamps(skb); + skt->hwtstamp = timeval_to_ktime(tv); + LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp)); + skb->tstamp = timeval_to_ktime(tv); + netif_rx(skb); + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += cf->can_dlc; +} + +static void k61_process_response(struct k61_can *priv_data, + struct spi_miso *resp) +{ + int ret = 0; + + LOGDI("<%x %2d [%d]\n", resp->cmd, resp->len, resp->seq); + if (resp->cmd == CMD_CAN_RECEIVE_FRAME) { + struct can_receive_frame *frame = + (struct can_receive_frame *)&resp->data; + k61_receive_frame(priv_data, frame); + } else if (resp->cmd == CMD_GET_FW_VERSION) { + struct can_fw_resp *fw_resp = (struct can_fw_resp *)resp->data; + + dev_info(&priv_data->spidev->dev, "fw %d.%d.%d", + fw_resp->maj, fw_resp->min, fw_resp->ver); + } + + if (resp->cmd == priv_data->wait_cmd) { + priv_data->cmd_result = ret; + complete(&priv_data->response_completion); + } +} + +static void k61_process_rx(struct k61_can *priv_data, char *rx_buf) +{ + struct spi_miso *resp; + int length_processed = 0, actual_length = priv_data->xfer_length; + + while (length_processed < actual_length) { + int length_left = actual_length - length_processed; + int length = 0; /* length of consumed chunk */ + void *data; + + data = rx_buf + length_processed; + resp = (struct spi_miso *)data; + + if (resp->cmd == 0) { + /* special case. ignore cmd==0 */ + length_processed += 1; + continue; + } + + LOGDI("processing. p %d -> l %d (t %d)\n", + length_processed, length_left, priv_data->xfer_length); + length = resp->len + sizeof(*resp); + + if (length <= length_left) { + k61_process_response(priv_data, resp); + length_processed += length; + } else { + /* Incomplete command */ + break; + } + } +} + +static int k61_do_spi_transaction(struct k61_can *priv_data) +{ + struct spi_device *spi; + struct spi_transfer *xfer; + struct spi_message *msg; + int ret; + + spi = priv_data->spidev; + msg = devm_kzalloc(&spi->dev, sizeof(*msg), GFP_KERNEL); + xfer = devm_kzalloc(&spi->dev, sizeof(*xfer), GFP_KERNEL); + if (xfer == 0 || msg == 0) + return -ENOMEM; + spi_message_init(msg); + + spi_message_add_tail(xfer, msg); + xfer->tx_buf = priv_data->tx_buf; + xfer->rx_buf = priv_data->rx_buf; + xfer->len = XFER_BUFFER_SIZE; + xfer->bits_per_word = priv_data->bits_per_word; + + ret = spi_sync(spi, msg); + LOGDI("spi_sync ret %d\n", ret); + + if (ret == 0) { + devm_kfree(&spi->dev, msg); + devm_kfree(&spi->dev, xfer); + k61_process_rx(priv_data, priv_data->rx_buf); + } + return ret; +} + +static int k61_rx_message(struct k61_can *priv_data) +{ + char *tx_buf, *rx_buf; + int ret; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + ret = k61_do_spi_transaction(priv_data); + mutex_unlock(&priv_data->spi_lock); + + return ret; +} + +static int k61_query_firmware_version(struct k61_can *priv_data) +{ + char *tx_buf, *rx_buf; + int ret; + struct spi_mosi *req; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + req->cmd = CMD_GET_FW_VERSION; + req->len = 0; + req->seq = atomic_inc_return(&priv_data->msg_seq); + + priv_data->wait_cmd = CMD_GET_FW_VERSION; + priv_data->cmd_result = -1; + reinit_completion(&priv_data->response_completion); + + ret = k61_do_spi_transaction(priv_data); + mutex_unlock(&priv_data->spi_lock); + + if (ret == 0) { + wait_for_completion_interruptible_timeout( + &priv_data->response_completion, 0.001 * HZ); + ret = priv_data->cmd_result; + } + + return ret; +} + +static int k61_can_write(struct k61_can *priv_data, struct can_frame *cf) +{ + char *tx_buf, *rx_buf; + int ret, i; + struct spi_mosi *req; + struct can_write_req *req_d; + struct net_device *netdev; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + req->cmd = CMD_CAN_SEND_FRAME; + req->len = sizeof(struct can_write_req) + 8; + req->seq = atomic_inc_return(&priv_data->msg_seq); + + req_d = (struct can_write_req *)req->data; + req_d->mid = cf->can_id; + req_d->dlc = cf->can_dlc; + for (i = 0; i < cf->can_dlc; i++) + req_d->data[i] = cf->data[i]; + + ret = k61_do_spi_transaction(priv_data); + netdev = priv_data->netdev; + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += cf->can_dlc; + mutex_unlock(&priv_data->spi_lock); + + return ret; +} + +static int k61_netdev_open(struct net_device *netdev) +{ + int err; + + LOGNI("Open"); + err = open_candev(netdev); + if (err) + return err; + + netif_start_queue(netdev); + + return 0; +} + +static int k61_netdev_close(struct net_device *netdev) +{ + LOGNI("Close"); + + netif_stop_queue(netdev); + close_candev(netdev); + return 0; +} + +static void k61_send_can_frame(struct work_struct *ws) +{ + struct k61_tx_work *tx_work; + struct can_frame *cf; + struct k61_can *priv_data; + struct net_device *netdev; + struct k61_netdev_privdata *netdev_priv_data; + + tx_work = container_of(ws, struct k61_tx_work, work); + netdev = tx_work->netdev; + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->k61_can; + LOGDI("send_can_frame ws %p\n", ws); + LOGDI("send_can_frame tx %p\n", tx_work); + + cf = (struct can_frame *)tx_work->skb->data; + k61_can_write(priv_data, cf); + + dev_kfree_skb(tx_work->skb); + kfree(tx_work); +} + +static int k61_frame_filter(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + char *tx_buf, *rx_buf; + int ret; + struct spi_mosi *req; + struct can_add_filter_req *add_filter; + struct can_add_filter_req *filter_request; + struct k61_can *priv_data; + struct k61_netdev_privdata *netdev_priv_data; + struct spi_device *spi; + + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->k61_can; + spi = priv_data->spidev; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + if (!ifr) + return -EINVAL; + + filter_request = + devm_kzalloc(&spi->dev, sizeof(struct can_add_filter_req), + GFP_KERNEL); + if (!filter_request) + return -ENOMEM; + + if (copy_from_user(filter_request, ifr->ifr_data, + sizeof(struct can_add_filter_req))) { + devm_kfree(&spi->dev, filter_request); + return -EFAULT; + } + + req = (struct spi_mosi *)tx_buf; + if (cmd == IOCTL_ADD_FRAME_FILTER) + req->cmd = CMD_CAN_ADD_FILTER; + else + req->cmd = CMD_CAN_REMOVE_FILTER; + + req->len = sizeof(struct can_add_filter_req); + req->seq = atomic_inc_return(&priv_data->msg_seq); + + add_filter = (struct can_add_filter_req *)req->data; + add_filter->can_if = filter_request->can_if; + add_filter->mid = filter_request->mid; + add_filter->mask = filter_request->mask; + + ret = k61_do_spi_transaction(priv_data); + devm_kfree(&spi->dev, filter_request); + mutex_unlock(&priv_data->spi_lock); + return ret; +} + +static netdev_tx_t k61_netdev_start_xmit( + struct sk_buff *skb, struct net_device *netdev) +{ + struct k61_netdev_privdata *netdev_priv_data = netdev_priv(netdev); + struct k61_can *priv_data = netdev_priv_data->k61_can; + struct k61_tx_work *tx_work; + + LOGNI("netdev_start_xmit"); + if (can_dropped_invalid_skb(netdev, skb)) { + LOGNE("Dropping invalid can frame\n"); + return NETDEV_TX_OK; + } + tx_work = kzalloc(sizeof(*tx_work), GFP_ATOMIC); + if (tx_work == 0) + return NETDEV_TX_OK; + INIT_WORK(&tx_work->work, k61_send_can_frame); + tx_work->netdev = netdev; + tx_work->skb = skb; + queue_work(priv_data->tx_wq, &tx_work->work); + + return NETDEV_TX_OK; +} + +static int k61_send_release_can_buffer_cmd(struct net_device *netdev) +{ + struct k61_can *priv_data; + struct k61_netdev_privdata *netdev_priv_data; + struct spi_device *spi; + char *tx_buf, *rx_buf; + int ret; + struct spi_mosi *req; + + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->k61_can; + spi = priv_data->spidev; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + req->cmd = CMD_CAN_RELEASE_BUFFER; + req->len = 0; + req->seq = atomic_inc_return(&priv_data->msg_seq); + + ret = k61_do_spi_transaction(priv_data); + mutex_unlock(&priv_data->spi_lock); + return ret; +} + +static int k61_remove_all_buffering(struct net_device *netdev) +{ + char *tx_buf, *rx_buf; + int ret; + struct spi_mosi *req; + struct k61_can *priv_data; + struct k61_netdev_privdata *netdev_priv_data; + + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->k61_can; + + mutex_lock(&priv_data->spi_lock); + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + req->cmd = CMD_CAN_DATA_BUFF_REMOVE_ALL; + req->len = 0; + req->seq = atomic_inc_return(&priv_data->msg_seq); + + priv_data->wait_cmd = req->cmd; + priv_data->cmd_result = -1; + reinit_completion(&priv_data->response_completion); + + ret = k61_do_spi_transaction(priv_data); + mutex_unlock(&priv_data->spi_lock); + + if (ret == 0) { + LOGDI("k61_do_blocking_ioctl ready to wait for response\n"); + /* Flash write may take some time. Hence give 2s as + * wait duration in the worst case. This wait time should + * increase if more number of frame IDs are stored in flash. + */ + ret = wait_for_completion_interruptible_timeout( + &priv_data->response_completion, 2 * HZ); + ret = priv_data->cmd_result; + } + + return ret; +} + +static int k61_convert_ioctl_cmd_to_spi_cmd(int ioctl_cmd) +{ + switch (ioctl_cmd) { + case IOCTL_ENABLE_BUFFERING: + return CMD_CAN_DATA_BUFF_ADD; + case IOCTL_DISABLE_BUFFERING: + return CMD_CAN_DATA_BUFF_REMOVE; + } + return -EINVAL; +} + +static int k61_data_buffering(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + int spi_cmd, ret; + char *tx_buf, *rx_buf; + struct k61_can *priv_data; + struct spi_mosi *req; + struct k61_netdev_privdata *netdev_priv_data; + struct k61_add_can_buffer *enable_buffering; + struct k61_add_can_buffer *add_request; + struct spi_device *spi; + + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->k61_can; + spi = priv_data->spidev; + + mutex_lock(&priv_data->spi_lock); + spi_cmd = k61_convert_ioctl_cmd_to_spi_cmd(cmd); + if (spi_cmd < 0) { + LOGDE("k61_do_blocking_ioctl wrong command %d\n", cmd); + return spi_cmd; + } + + if (!ifr) + return -EINVAL; + + add_request = devm_kzalloc(&spi->dev, sizeof(struct k61_add_can_buffer), + GFP_KERNEL); + if (!add_request) + return -ENOMEM; + + if (copy_from_user(add_request, ifr->ifr_data, + sizeof(struct k61_add_can_buffer))) { + devm_kfree(&spi->dev, add_request); + return -EFAULT; + } + + tx_buf = priv_data->tx_buf; + rx_buf = priv_data->rx_buf; + memset(tx_buf, 0, XFER_BUFFER_SIZE); + memset(rx_buf, 0, XFER_BUFFER_SIZE); + priv_data->xfer_length = XFER_BUFFER_SIZE; + + req = (struct spi_mosi *)tx_buf; + req->cmd = spi_cmd; + req->len = sizeof(struct k61_add_can_buffer); + req->seq = atomic_inc_return(&priv_data->msg_seq); + + enable_buffering = (struct k61_add_can_buffer *)req->data; + enable_buffering->can_if = add_request->can_if; + enable_buffering->mid = add_request->mid; + enable_buffering->mask = add_request->mask; + + priv_data->wait_cmd = spi_cmd; + priv_data->cmd_result = -1; + reinit_completion(&priv_data->response_completion); + + ret = k61_do_spi_transaction(priv_data); + devm_kfree(&spi->dev, add_request); + mutex_unlock(&priv_data->spi_lock); + + if (ret == 0) { + LOGDI("k61_do_blocking_ioctl ready to wait for response\n"); + /* Flash write may take some time. Hence give 400ms as + * wait duration in the worst case. + */ + ret = wait_for_completion_interruptible_timeout( + &priv_data->response_completion, 0.4 * HZ); + ret = priv_data->cmd_result; + } + return ret; +} + +static int k61_netdev_do_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct k61_can *priv_data; + struct k61_netdev_privdata *netdev_priv_data; + int ret = -EINVAL; + + netdev_priv_data = netdev_priv(netdev); + priv_data = netdev_priv_data->k61_can; + LOGDI("k61_netdev_do_ioctl %x\n", cmd); + + switch (cmd) { + case IOCTL_ADD_FRAME_FILTER: + case IOCTL_REMOVE_FRAME_FILTER: + ret = k61_frame_filter(netdev, ifr, cmd); + break; + case IOCTL_ENABLE_BUFFERING: + case IOCTL_DISABLE_BUFFERING: + ret = k61_data_buffering(netdev, ifr, cmd); + break; + case IOCTL_DISABLE_ALL_BUFFERING: + ret = k61_remove_all_buffering(netdev); + break; + case IOCTL_RELEASE_CAN_BUFFER: + ret = k61_send_release_can_buffer_cmd(netdev); + break; + } + return ret; +} + +static const struct net_device_ops k61_netdev_ops = { + .ndo_open = k61_netdev_open, + .ndo_stop = k61_netdev_close, + .ndo_start_xmit = k61_netdev_start_xmit, + .ndo_do_ioctl = k61_netdev_do_ioctl, +}; + +static int k61_create_netdev(struct spi_device *spi, + struct k61_can *priv_data) +{ + struct net_device *netdev; + struct k61_netdev_privdata *netdev_priv_data; + + LOGDI("k61_create_netdev\n"); + netdev = alloc_candev(sizeof(*netdev_priv_data), MAX_TX_BUFFERS); + if (!netdev) { + LOGDE("Couldn't alloc candev\n"); + return -ENOMEM; + } + + netdev_priv_data = netdev_priv(netdev); + netdev_priv_data->k61_can = priv_data; + + priv_data->netdev = netdev; + + netdev->netdev_ops = &k61_netdev_ops; + SET_NETDEV_DEV(netdev, &spi->dev); + netdev_priv_data->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | + CAN_CTRLMODE_LISTENONLY; + netdev_priv_data->can.bittiming_const = &k61_bittiming_const; + netdev_priv_data->can.clock.freq = K61_CLOCK; + + return 0; +} + +static struct k61_can *k61_create_priv_data(struct spi_device *spi) +{ + struct k61_can *priv_data; + int err; + struct device *dev; + + dev = &spi->dev; + priv_data = devm_kzalloc(dev, sizeof(*priv_data), GFP_KERNEL); + if (!priv_data) { + dev_err(dev, "Couldn't alloc k61_can\n"); + return 0; + } + spi_set_drvdata(spi, priv_data); + atomic_set(&priv_data->netif_queue_stop, 0); + priv_data->spidev = spi; + + priv_data->tx_wq = alloc_workqueue("k61_tx_wq", 0, 0); + if (!priv_data->tx_wq) { + dev_err(dev, "Couldn't alloc workqueue\n"); + err = -ENOMEM; + goto cleanup_privdata; + } + + priv_data->tx_buf = devm_kzalloc(dev, XFER_BUFFER_SIZE, + GFP_KERNEL); + priv_data->rx_buf = devm_kzalloc(dev, XFER_BUFFER_SIZE, + GFP_KERNEL); + if (!priv_data->tx_buf || !priv_data->rx_buf) { + dev_err(dev, "Couldn't alloc tx or rx buffers\n"); + err = -ENOMEM; + goto cleanup_privdata; + } + priv_data->xfer_length = 0; + + mutex_init(&priv_data->spi_lock); + atomic_set(&priv_data->msg_seq, 0); + init_completion(&priv_data->response_completion); + return priv_data; + +cleanup_privdata: + if (priv_data) { + if (priv_data->tx_wq) + destroy_workqueue(priv_data->tx_wq); + } + return 0; +} + +static int k61_probe(struct spi_device *spi) +{ + int err, retry = 0, query_err = -1; + struct k61_can *priv_data; + struct device *dev; + + dev = &spi->dev; + dev_dbg(dev, "k61_probe"); + + err = spi_setup(spi); + if (err) { + dev_err(dev, "spi_setup failed: %d", err); + return err; + } + + priv_data = k61_create_priv_data(spi); + if (!priv_data) { + dev_err(dev, "Failed to create k61_can priv_data\n"); + err = -ENOMEM; + return err; + } + dev_dbg(dev, "k61_probe created priv_data"); + + err = of_property_read_u32(spi->dev.of_node, "bits-per-word", + &priv_data->bits_per_word); + if (err) + priv_data->bits_per_word = 16; + + err = of_property_read_u32(spi->dev.of_node, "reset-delay-msec", + &priv_data->reset_delay_msec); + if (err) + priv_data->reset_delay_msec = 1; + + priv_data->reset = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0); + if (gpio_is_valid(priv_data->reset)) { + err = gpio_request(priv_data->reset, "k61-reset"); + if (err < 0) { + dev_err(&spi->dev, + "failed to request gpio %d: %d\n", + priv_data->reset, err); + goto cleanup_candev; + } + + gpio_direction_output(priv_data->reset, 0); + udelay(1); + gpio_direction_output(priv_data->reset, 1); + msleep(priv_data->reset_delay_msec); + } + + err = k61_create_netdev(spi, priv_data); + if (err) { + dev_err(dev, "Failed to create CAN device: %d", err); + goto cleanup_candev; + } + + err = register_candev(priv_data->netdev); + if (err) { + dev_err(dev, "Failed to register CAN device: %d", err); + goto unregister_candev; + } + + err = request_threaded_irq(spi->irq, NULL, k61_irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "k61", priv_data); + if (err) { + dev_err(dev, "Failed to request irq: %d", err); + goto unregister_candev; + } + dev_dbg(dev, "Request irq %d ret %d\n", spi->irq, err); + + while ((query_err != 0) && (retry < K61_FW_QUERY_RETRY_COUNT)) { + query_err = k61_query_firmware_version(priv_data); + retry++; + } + + if (query_err) { + dev_info(dev, "K61 probe failed\n"); + err = -ENODEV; + goto free_irq; + } + return 0; + +free_irq: + free_irq(spi->irq, priv_data); +unregister_candev: + unregister_candev(priv_data->netdev); +cleanup_candev: + if (priv_data) { + if (priv_data->netdev) + free_candev(priv_data->netdev); + if (priv_data->tx_wq) + destroy_workqueue(priv_data->tx_wq); + } + return err; +} + +static int k61_remove(struct spi_device *spi) +{ + struct k61_can *priv_data = spi_get_drvdata(spi); + + LOGDI("k61_remove\n"); + unregister_candev(priv_data->netdev); + free_candev(priv_data->netdev); + destroy_workqueue(priv_data->tx_wq); + return 0; +} + +static const struct of_device_id k61_match_table[] = { + { .compatible = "fsl,k61" }, + { .compatible = "nxp,mpc5746c" }, + { } +}; + +static struct spi_driver k61_driver = { + .driver = { + .name = "k61", + .of_match_table = k61_match_table, + .owner = THIS_MODULE, + }, + .probe = k61_probe, + .remove = k61_remove, +}; +module_spi_driver(k61_driver); + +MODULE_DESCRIPTION("Freescale K61 SPI-CAN module"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index ecc4a334c507..0a54e7dac0ab 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -608,7 +608,7 @@ static void nb8800_mac_config(struct net_device *dev) mac_mode |= HALF_DUPLEX; if (gigabit) { - if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_is_rgmii(dev->phydev)) mac_mode |= RGMII_MODE; mac_mode |= GMAC_MODE; @@ -1295,11 +1295,10 @@ static int nb8800_tangox_init(struct net_device *dev) break; case PHY_INTERFACE_MODE_RGMII: - pad_mode = PAD_MODE_RGMII; - break; - + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: - pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; + pad_mode = PAD_MODE_RGMII; break; default: diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 21e5b9ed1ead..3613469dc5c6 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -8722,11 +8722,14 @@ static void tg3_free_consistent(struct tg3 *tp) tg3_mem_rx_release(tp); tg3_mem_tx_release(tp); + /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ + tg3_full_lock(tp, 0); if (tp->hw_stats) { dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } + tg3_full_unlock(tp); } /* diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index cc199063612a..6c66d2979795 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -630,6 +630,10 @@ static void dump_command(struct mlx5_core_dev *dev, pr_debug("\n"); } +static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); +static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, + struct mlx5_cmd_msg *msg); + static void cmd_work_handler(struct work_struct *work) { struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); @@ -638,16 +642,27 @@ static void cmd_work_handler(struct work_struct *work) struct mlx5_cmd_layout *lay; struct semaphore *sem; unsigned long flags; + int alloc_ret; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { - ent->idx = alloc_ent(cmd); - if (ent->idx < 0) { + alloc_ret = alloc_ent(cmd); + if (alloc_ret < 0) { + if (ent->callback) { + ent->callback(-EAGAIN, ent->context); + mlx5_free_cmd_msg(dev, ent->out); + free_msg(dev, ent->in); + free_cmd(ent); + } else { + ent->ret = -EAGAIN; + complete(&ent->done); + } mlx5_core_err(dev, "failed to allocate command entry\n"); up(sem); return; } + ent->idx = alloc_ret; } else { ent->idx = cmd->max_reg_cmds; spin_lock_irqsave(&cmd->alloc_lock, flags); diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c index de14dcc6f4ed..a342e39b9f43 100644 --- a/drivers/net/ethernet/msm/msm_rmnet_mhi.c +++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c @@ -958,6 +958,7 @@ static void rmnet_mhi_cb(struct mhi_cb_info *cb_info) { struct rmnet_mhi_private *rmnet_mhi_ptr; struct mhi_result *result; + char ifalias[IFALIASZ]; int r = 0; if (!cb_info || !cb_info->result) { @@ -979,9 +980,16 @@ static void rmnet_mhi_cb(struct mhi_cb_info *cb_info) * as we set mhi_enabled = 0, we gurantee rest of * driver will not touch any critical data. */ + snprintf(ifalias, sizeof(ifalias), "%s", "unidentified_netdev"); write_lock_irq(&rmnet_mhi_ptr->pm_lock); rmnet_mhi_ptr->mhi_enabled = 0; write_unlock_irq(&rmnet_mhi_ptr->pm_lock); + /* Set unidentified_net_dev string to ifalias + * on error notification + */ + rtnl_lock(); + dev_set_alias(rmnet_mhi_ptr->dev, ifalias, strlen(ifalias)); + rtnl_unlock(); if (cb_info->chan == rmnet_mhi_ptr->rx_channel) { rmnet_log(rmnet_mhi_ptr, MSG_INFO, diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 4296066a7ad3..479af106aaeb 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -819,6 +819,7 @@ static struct sh_eth_cpu_data r8a7740_data = { .rpadir_value = 2 << 16, .no_trimd = 1, .no_ade = 1, + .hw_crc = 1, .tsu = 1, .select_mii = 1, .shift_rd0 = 1, diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index bca6a1e72d1d..e1bb802d4a4d 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val) static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) { struct usb_device *dev = mcs->usbdev; - int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, - MCS_RD_RTYPE, 0, reg, val, 2, - msecs_to_jiffies(MCS_CTRL_TIMEOUT)); + void *dmabuf; + int ret; + + dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, + MCS_RD_RTYPE, 0, reg, dmabuf, 2, + msecs_to_jiffies(MCS_CTRL_TIMEOUT)); + + memcpy(val, dmabuf, sizeof(__u16)); + kfree(dmabuf); return ret; } diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 32f10662f4ac..7242dd4b3238 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -29,6 +29,7 @@ #define MII_DP83867_MICR 0x12 #define MII_DP83867_ISR 0x13 #define DP83867_CTRL 0x1f +#define DP83867_CFG3 0x1e /* Extended Registers */ #define DP83867_RGMIICTL 0x0032 @@ -89,6 +90,8 @@ static int dp83867_config_intr(struct phy_device *phydev) micr_status |= (MII_DP83867_MICR_AN_ERR_INT_EN | MII_DP83867_MICR_SPEED_CHNG_INT_EN | + MII_DP83867_MICR_AUTONEG_COMP_INT_EN | + MII_DP83867_MICR_LINK_STS_CHNG_INT_EN | MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN | MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN); @@ -184,6 +187,13 @@ static int dp83867_config_init(struct phy_device *phydev) DP83867_DEVADDR, phydev->addr, delay); } + /* Enable Interrupt output INT_OE in CFG3 register */ + if (phy_interrupt_is_valid(phydev)) { + val = phy_read(phydev, DP83867_CFG3); + val |= BIT(7); + phy_write(phydev, DP83867_CFG3, val); + } + return 0; } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 851c0e121807..49d9f0a789fe 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -541,6 +541,9 @@ void phy_stop_machine(struct phy_device *phydev) if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); + + /* Now we can run the state machine synchronously */ + phy_state_machine(&phydev->state_queue.work); } /** @@ -918,6 +921,15 @@ void phy_state_machine(struct work_struct *work) if (old_link != phydev->link) phydev->state = PHY_CHANGELINK; } + /* + * Failsafe: check that nobody set phydev->link=0 between two + * poll cycles, otherwise we won't leave RUNNING state as long + * as link remains down. + */ + if (!phydev->link && phydev->state == PHY_RUNNING) { + phydev->state = PHY_CHANGELINK; + dev_err(&phydev->dev, "no link in PHY_RUNNING\n"); + } break; case PHY_CHANGELINK: err = phy_read_status(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1d1e5f7723ab..8179727d3423 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1368,6 +1368,8 @@ static int phy_remove(struct device *dev) { struct phy_device *phydev = to_phy_device(dev); + cancel_delayed_work_sync(&phydev->state_queue); + mutex_lock(&phydev->lock); phydev->state = PHY_DOWN; mutex_unlock(&phydev->lock); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 582d8f0c6266..958af3b1af7f 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -707,6 +707,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 9cda1303c9e1..769f89e8d14c 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1009,11 +1009,17 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar) struct bus_opaque *ar_opaque = ath10k_bus_priv(ar); int ce_id; struct ath10k_ce_pipe *ce_state; + u8 ce_count; + if (QCA_REV_WCN3990(ar)) + ce_count = CE_COUNT; + else /* Skip the last copy engine, CE7 the diagnostic window, as that * uses polling and isn't initialized for interrupts. */ - for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) { + ce_count = CE_COUNT - 1; + + for (ce_id = 0; ce_id < ce_count; ce_id++) { ce_state = &ar_opaque->ce_states[ce_id]; ath10k_ce_per_engine_handler_adjust(ce_state); } diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index ec86c837e60a..01175d94adca 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4507,6 +4507,13 @@ static int ath10k_start(struct ieee80211_hw *hw) goto err_core_stop; } + param = ar->wmi.pdev_param->idle_ps_config; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); + if (ret) { + ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret); + goto err_core_stop; + } + if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { ret = ath10k_wmi_adaptive_qcs(ar, true); if (ret) { @@ -5472,7 +5479,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct cfg80211_scan_request *req = &hw_req->req; struct wmi_start_scan_arg arg; - int ret = 0; + const u8 *ptr; + int ret = 0, ie_skip_len = 0; int i; mutex_lock(&ar->conf_mutex); @@ -5504,8 +5512,16 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, arg.scan_id = ATH10K_SCAN_ID; if (req->ie_len) { - arg.ie_len = req->ie_len; - memcpy(arg.ie, req->ie, arg.ie_len); + if (QCA_REV_WCN3990(ar)) { + ptr = req->ie; + while (ptr[0] == WLAN_EID_SUPP_RATES || + ptr[0] == WLAN_EID_EXT_SUPP_RATES) { + ie_skip_len = ptr[1] + 2; + ptr += ie_skip_len; + } + } + arg.ie_len = req->ie_len - ie_skip_len; + memcpy(arg.ie, req->ie + ie_skip_len, arg.ie_len); } if (req->n_ssids) { @@ -5514,6 +5530,11 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, arg.ssids[i].len = req->ssids[i].ssid_len; arg.ssids[i].ssid = req->ssids[i].ssid; } + if (QCA_REV_WCN3990(ar)) { + arg.scan_ctrl_flags &= + ~(WMI_SCAN_ADD_BCAST_PROBE_REQ | + WMI_SCAN_CHAN_STAT_EVENT); + } } else { arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; } @@ -6412,7 +6433,13 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, arg.dwell_time_passive = scan_time_msec; arg.max_scan_time = scan_time_msec; arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; - arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; + if (QCA_REV_WCN3990(ar)) { + arg.scan_ctrl_flags &= ~(WMI_SCAN_FILTER_PROBE_REQ | + WMI_SCAN_CHAN_STAT_EVENT | + WMI_SCAN_ADD_BCAST_PROBE_REQ); + } else { + arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; + } arg.burst_duration_ms = duration; ret = ath10k_start_scan(ar, &arg); diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index 10223605b027..dd310d5a7028 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -719,6 +719,7 @@ static int ath10k_snoc_driver_event_server_exit(struct ath10k *ar) atomic_set(&qmi_cfg->fw_ready, 0); qmi_cfg->msa_ready = false; atomic_set(&qmi_cfg->server_connected, 0); + qmi_handle_destroy(qmi_cfg->wlfw_clnt); return 0; } @@ -896,5 +897,6 @@ void ath10k_snoc_stop_qmi_service(struct ath10k *ar) WLFW_SERVICE_INS_ID_V01, &qmi_cfg->wlfw_clnt_nb); destroy_workqueue(qmi_cfg->event_wq); + qmi_handle_destroy(qmi_cfg->wlfw_clnt); qmi_cfg = NULL; } diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 13736750e463..c42d7eebf465 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -17,6 +17,7 @@ #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/bitops.h> +#include <linux/suspend.h> #include "core.h" #include "debug.h" #include "hif.h" @@ -26,6 +27,8 @@ #include "qmi.h" #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/clk.h> #define WCN3990_MAX_IRQ 12 @@ -47,6 +50,7 @@ const char *ce_name[WCN3990_MAX_IRQ] = { #define ATH10K_SNOC_TARGET_WAIT 3000 #define ATH10K_SNOC_NUM_WARM_RESET_ATTEMPTS 3 #define SNOC_HIF_POWER_DOWN_DELAY 30 +#define ATH10K_MAX_PROP_SIZE 32 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar); static int ath10k_snoc_request_irq(struct ath10k *ar); @@ -955,9 +959,13 @@ static int ath10k_snoc_init_pipes(struct ath10k *ar) static void ath10k_snoc_hif_power_down(struct ath10k *ar) { + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); msleep(SNOC_HIF_POWER_DOWN_DELAY); - ath10k_snoc_qmi_wlan_disable(ar); + + if (!atomic_read(&ar_snoc->pm_ops_inprogress)) + ath10k_snoc_qmi_wlan_disable(ar); } int ath10k_snoc_get_ce_id(struct ath10k *ar, int irq) @@ -1141,10 +1149,18 @@ static int ath10k_snoc_claim(struct ath10k *ar) static int ath10k_snoc_hif_power_up(struct ath10k *ar) { int ret; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n", __func__, ar->state); + if (atomic_read(&ar_snoc->pm_ops_inprogress)) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "%s: WLAN OFF CMD Reset on PM Resume\n", __func__); + ath10k_snoc_qmi_wlan_disable(ar); + atomic_set(&ar_snoc->pm_ops_inprogress, 0); + } + if (ar->state == ATH10K_STATE_ON || test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) { ret = ath10k_snoc_bus_configure(ar); @@ -1208,6 +1224,353 @@ out: return ret; } +static +int ath10k_snoc_pm_notifier(struct notifier_block *nb, + unsigned long pm_event, void *data) +{ + struct ath10k_snoc *ar_snoc = + container_of(nb, struct ath10k_snoc, pm_notifier); + struct ath10k *ar = ar_snoc->ar; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "%s: PM Event: %lu\n", __func__, pm_event); + + switch (pm_event) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + case PM_RESTORE_PREPARE: + case PM_POST_RESTORE: + atomic_set(&ar_snoc->pm_ops_inprogress, 1); + break; + default: + break; + } + + return NOTIFY_DONE; +} + +static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev, + struct ath10k_wcn3990_vreg_info *vreg_info) +{ + int ret = 0; + char prop_name[ATH10K_MAX_PROP_SIZE]; + struct regulator *reg; + const __be32 *prop; + int len = 0; + int i; + + reg = devm_regulator_get_optional(dev, vreg_info->name); + if (PTR_ERR(reg) == -EPROBE_DEFER) { + ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n", + vreg_info->name); + ret = PTR_ERR(reg); + goto out; + } + + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + + if (vreg_info->required) { + ath10k_err(ar, "Regulator %s doesn't exist: %d\n", + vreg_info->name, ret); + goto out; + } else { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Optional regulator %s doesn't exist: %d\n", + vreg_info->name, ret); + goto done; + } + } + + vreg_info->reg = reg; + + snprintf(prop_name, ATH10K_MAX_PROP_SIZE, + "qcom,%s-config", vreg_info->name); + + prop = of_get_property(dev->of_node, prop_name, &len); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Got regulator cfg,prop: %s, len: %d\n", + prop_name, len); + + if (!prop || len < (2 * sizeof(__be32))) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Property %s %s\n", prop_name, + prop ? "invalid format" : "doesn't exist"); + goto done; + } + + for (i = 0; (i * sizeof(__be32)) < len; i++) { + switch (i) { + case 0: + vreg_info->min_v = be32_to_cpup(&prop[0]); + break; + case 1: + vreg_info->max_v = be32_to_cpup(&prop[1]); + break; + case 2: + vreg_info->load_ua = be32_to_cpup(&prop[2]); + break; + case 3: + vreg_info->settle_delay = be32_to_cpup(&prop[3]); + break; + default: + ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s, ignoring val %d\n", + prop_name, i); + break; + } + } + +done: + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "vreg: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n", + vreg_info->name, vreg_info->min_v, vreg_info->max_v, + vreg_info->load_ua, vreg_info->settle_delay); + + return 0; + +out: + return ret; +} + +static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev, + struct ath10k_wcn3990_clk_info *clk_info) +{ + struct clk *handle; + int ret = 0; + + handle = devm_clk_get(dev, clk_info->name); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + if (clk_info->required) { + ath10k_err(ar, "Clock %s isn't available: %d\n", + clk_info->name, ret); + goto out; + } else { + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Ignoring clk %s: %d\n", + clk_info->name, + ret); + ret = 0; + goto out; + } + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock: %s, freq: %u\n", + clk_info->name, clk_info->freq); + + clk_info->handle = handle; +out: + return ret; +} + +static int ath10k_wcn3990_vreg_on(struct ath10k *ar) +{ + int ret = 0; + struct ath10k_wcn3990_vreg_info *vreg_info; + int i; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being enabled\n", + vreg_info->name); + + ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v, + vreg_info->max_v); + if (ret) { + ath10k_err(ar, + "vreg %s, set failed:min:%u,max:%u,ret: %d\n", + vreg_info->name, vreg_info->min_v, + vreg_info->max_v, ret); + break; + } + + if (vreg_info->load_ua) { + ret = regulator_set_load(vreg_info->reg, + vreg_info->load_ua); + if (ret < 0) { + ath10k_err(ar, + "Reg %s, can't set load:%u,ret: %d\n", + vreg_info->name, + vreg_info->load_ua, ret); + break; + } + } + + ret = regulator_enable(vreg_info->reg); + if (ret) { + ath10k_err(ar, "Regulator %s, can't enable: %d\n", + vreg_info->name, ret); + break; + } + + if (vreg_info->settle_delay) + udelay(vreg_info->settle_delay); + } + + if (!ret) + return 0; + + for (; i >= 0; i--) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + regulator_disable(vreg_info->reg); + regulator_set_load(vreg_info->reg, 0); + regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v); + } + + return ret; +} + +static int ath10k_wcn3990_vreg_off(struct ath10k *ar) +{ + int ret = 0; + struct ath10k_wcn3990_vreg_info *vreg_info; + int i; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (i = ATH10K_WCN3990_VREG_INFO_SIZE - 1; i >= 0; i--) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being disabled\n", + vreg_info->name); + + ret = regulator_disable(vreg_info->reg); + if (ret) + ath10k_err(ar, "Regulator %s, can't disable: %d\n", + vreg_info->name, ret); + + ret = regulator_set_load(vreg_info->reg, 0); + if (ret < 0) + ath10k_err(ar, "Regulator %s, can't set load: %d\n", + vreg_info->name, ret); + + ret = regulator_set_voltage(vreg_info->reg, 0, + vreg_info->max_v); + if (ret) + ath10k_err(ar, "Regulator %s, can't set voltage: %d\n", + vreg_info->name, ret); + } + + return ret; +} + +static int ath10k_wcn3990_clk_init(struct ath10k *ar) +{ + struct ath10k_wcn3990_clk_info *clk_info; + int i; + int ret = 0; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being enabled\n", + clk_info->name); + + if (clk_info->freq) { + ret = clk_set_rate(clk_info->handle, clk_info->freq); + + if (ret) { + ath10k_err(ar, "Clk %s,set err: %u,ret: %d\n", + clk_info->name, clk_info->freq, + ret); + break; + } + } + + ret = clk_prepare_enable(clk_info->handle); + if (ret) { + ath10k_err(ar, "Clock %s, can't enable: %d\n", + clk_info->name, ret); + break; + } + } + + if (ret == 0) + return 0; + + for (; i >= 0; i--) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + clk_disable_unprepare(clk_info->handle); + } + + return ret; +} + +static int ath10k_wcn3990_clk_deinit(struct ath10k *ar) +{ + struct ath10k_wcn3990_clk_info *clk_info; + int i; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being disabled\n", + clk_info->name); + + clk_disable_unprepare(clk_info->handle); + } + + return 0; +} + +static int ath10k_hw_power_on(struct ath10k *ar) +{ + int ret = 0; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power on\n"); + + ret = ath10k_wcn3990_vreg_on(ar); + if (ret) + goto out; + + ret = ath10k_wcn3990_clk_init(ar); + if (ret) + goto vreg_off; + + return ret; + +vreg_off: + ath10k_wcn3990_vreg_off(ar); +out: + return ret; +} + +static int ath10k_hw_power_off(struct ath10k *ar) +{ + int ret = 0; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power off\n"); + + ath10k_wcn3990_clk_deinit(ar); + + ret = ath10k_wcn3990_vreg_off(ar); + + return ret; +} + static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { .tx_sg = ath10k_snoc_hif_tx_sg, .start = ath10k_snoc_hif_start, @@ -1235,6 +1598,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev) enum ath10k_hw_rev hw_rev; struct device *dev; u32 chip_id; + u32 i; dev = &pdev->dev; hw_rev = ATH10K_HW_WCN3990; @@ -1268,22 +1632,43 @@ static int ath10k_snoc_probe(struct platform_device *pdev) setup_timer(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, (unsigned long)ar); + memcpy(ar_snoc->vreg, vreg_cfg, sizeof(vreg_cfg)); + for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) { + ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]); + if (ret) + goto err_core_destroy; + } + + memcpy(ar_snoc->clk, clk_cfg, sizeof(clk_cfg)); + for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) { + ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]); + if (ret) + goto err_core_destroy; + } + + ret = ath10k_hw_power_on(ar); + if (ret) { + ath10k_err(ar, "failed to power on device: %d\n", ret); + goto err_stop_qmi_service; + } + ret = ath10k_snoc_claim(ar); if (ret) { ath10k_err(ar, "failed to claim device: %d\n", ret); - goto err_stop_qmi_service; + goto err_hw_power_off; } + ret = ath10k_snoc_bus_configure(ar); if (ret) { ath10k_err(ar, "failed to configure bus: %d\n", ret); - goto err_stop_qmi_service; + goto err_hw_power_off; } ret = ath10k_snoc_alloc_pipes(ar); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", ret); - goto err_stop_qmi_service; + goto err_hw_power_off; } netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll, @@ -1306,6 +1691,9 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ath10k_snoc_modem_ssr_register_notifier(ar); ath10k_snoc_pd_restart_enable(ar); + ar_snoc->pm_notifier.notifier_call = ath10k_snoc_pm_notifier; + register_pm_notifier(&ar_snoc->pm_notifier); + ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 probed\n", __func__); return 0; @@ -1316,6 +1704,9 @@ err_free_irq: err_free_pipes: ath10k_snoc_free_pipes(ar); +err_hw_power_off: + ath10k_hw_power_off(ar); + err_stop_qmi_service: ath10k_snoc_stop_qmi_service(ar); @@ -1338,6 +1729,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev) ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 removed\n", __func__); + unregister_pm_notifier(&ar_snoc->pm_notifier); ath10k_core_unregister(ar); ath10k_snoc_pdr_unregister_notifier(ar); ath10k_snoc_modem_ssr_unregister_notifier(ar); @@ -1345,6 +1737,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev) ath10k_snoc_release_resource(ar); ath10k_snoc_free_pipes(ar); ath10k_snoc_stop_qmi_service(ar); + ath10k_hw_power_off(ar); ath10k_core_destroy(ar); return 0; diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h index 7a223b1d3ded..a02cb2ad928e 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.h +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -17,6 +17,7 @@ #include "ce.h" #include "pci.h" #include "qmi.h" +#include <linux/kernel.h> #include <soc/qcom/service-locator.h> #define ATH10K_SNOC_RX_POST_RETRY_MS 50 #define CE_POLL_PIPE 4 @@ -112,6 +113,38 @@ struct ath10k_snoc_ce_irq { u32 irq_line; }; +struct ath10k_wcn3990_vreg_info { + struct regulator *reg; + const char *name; + u32 min_v; + u32 max_v; + u32 load_ua; + unsigned long settle_delay; + bool required; +}; + +struct ath10k_wcn3990_clk_info { + struct clk *handle; + const char *name; + u32 freq; + bool required; +}; + +static struct ath10k_wcn3990_vreg_info vreg_cfg[] = { + {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false}, + {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false}, + {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false}, + {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false}, +}; + +#define ATH10K_WCN3990_VREG_INFO_SIZE ARRAY_SIZE(vreg_cfg) + +static struct ath10k_wcn3990_clk_info clk_cfg[] = { + {NULL, "cxo_ref_clk_pin", 0, false}, +}; + +#define ATH10K_WCN3990_CLK_INFO_SIZE ARRAY_SIZE(clk_cfg) + /* struct ath10k_snoc: SNOC info struct * @dev: device structure * @ar:ath10k base structure @@ -148,13 +181,17 @@ struct ath10k_snoc { u32 *vaddr_rri_on_ddr; bool is_driver_probed; struct notifier_block modem_ssr_nb; + struct notifier_block pm_notifier; void *modem_notify_handler; struct ath10k_service_notifier_context *service_notifier; struct notifier_block service_notifier_nb; int total_domains; struct notifier_block get_service_nb; atomic_t fw_crashed; + atomic_t pm_ops_inprogress; struct ath10k_snoc_qmi_config qmi_cfg; + struct ath10k_wcn3990_vreg_info vreg[ATH10K_WCN3990_VREG_INFO_SIZE]; + struct ath10k_wcn3990_clk_info clk[ATH10K_WCN3990_CLK_INFO_SIZE]; }; struct ath10k_event_pd_down_data { diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 07b15f4c1db4..f5360444a083 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1553,11 +1553,7 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar, cmd->ie_len = __cpu_to_le32(arg->ie_len); cmd->num_probes = __cpu_to_le32(3); - if (QCA_REV_WCN3990(ar)) { - cmd->common.scan_ctrl_flags = ar->fw_flags->flags; - cmd->common.scan_ctrl_flags |= - __cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT); - } else { + if (!QCA_REV_WCN3990(ar)) { cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); } diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index d6ec0de63582..86aedff096f6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -6192,6 +6192,8 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar, | WMI_SCAN_EVENT_BSS_CHANNEL | WMI_SCAN_EVENT_FOREIGN_CHANNEL | WMI_SCAN_EVENT_DEQUEUED; + if (QCA_REV_WCN3990(ar)) + arg->scan_ctrl_flags = ar->fw_flags->flags; arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT; arg->n_bssids = 1; arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF"; diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 7ae07a505c59..f59e5f86708b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -2960,6 +2960,8 @@ struct wmi_start_scan_arg { /* Different FW scan engine may choose to bail out on errors. * Allow the driver to have influence over that. */ #define WMI_SCAN_CONTINUE_ON_ERROR 0x80 +/** add DS content in probe req frame */ +#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ 0x800 /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */ #define WMI_SCAN_CLASS_MASK 0xFF000000 diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index 0e66348e7513..2ab6c5951561 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -60,3 +60,15 @@ config WIL6210_PLATFORM_MSM ---help--- Say Y here to enable wil6210 driver support for MSM platform specific features + +config WIL6210_DEBUGFS + bool "wil6210 debugfs support" + depends on WIL6210 + depends on DEBUG_FS + default y + ---help--- + Say Y here to enable wil6210 debugfs support, using the + kernel debugfs infrastructure. Select this + option if you are interested in debugging the driver. + + If unsure, say Y to make it easier to debug problems. diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index 4874c5ba1e61..94df1decae7a 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -4,7 +4,7 @@ wil6210-y := main.o wil6210-y += netdev.o wil6210-y += cfg80211.o wil6210-y += pcie_bus.o -wil6210-y += debugfs.o +wil6210-$(CONFIG_WIL6210_DEBUGFS) += debugfs.o wil6210-y += sysfs.o wil6210-y += wmi.o wil6210-y += interrupt.o diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 94861020af12..35dfa410c90c 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -31,6 +31,12 @@ static bool ignore_reg_hints = true; module_param(ignore_reg_hints, bool, 0444); MODULE_PARM_DESC(ignore_reg_hints, " Ignore OTA regulatory hints (Default: true)"); +#ifdef CONFIG_PM +static struct wiphy_wowlan_support wil_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT, +}; +#endif + #define CHAN60G(_channel, _flags) { \ .band = IEEE80211_BAND_60GHZ, \ .center_freq = 56160 + (2160 * (_channel)), \ @@ -345,12 +351,12 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, wil_dbg_wmi(wil, "Link status for CID %d: {\n" " MCS %d TSF 0x%016llx\n" - " BF status 0x%08x SNR 0x%08x SQI %d%%\n" + " BF status 0x%08x RSSI %d SQI %d%%\n" " Tx Tpt %d goodput %d Rx goodput %d\n" " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n", cid, le16_to_cpu(reply.evt.bf_mcs), le64_to_cpu(reply.evt.tsf), reply.evt.status, - le32_to_cpu(reply.evt.snr_val), + reply.evt.rssi, reply.evt.sqi, le32_to_cpu(reply.evt.tx_tpt), le32_to_cpu(reply.evt.tx_goodput), @@ -384,7 +390,11 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, if (test_bit(wil_status_fwconnected, wil->status)) { sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); - sinfo->signal = reply.evt.sqi; + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, + wil->fw_capabilities)) + sinfo->signal = reply.evt.rssi; + else + sinfo->signal = reply.evt.sqi; } return rc; @@ -445,6 +455,34 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy, return rc; } +static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "start_p2p_device: entered\n"); + wil->p2p.p2p_dev_started = 1; + return 0; +} + +static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil_p2p_info *p2p = &wil->p2p; + + if (!p2p->p2p_dev_started) + return; + + wil_dbg_misc(wil, "stop_p2p_device: entered\n"); + mutex_lock(&wil->mutex); + mutex_lock(&wil->p2p_wdev_mutex); + wil_p2p_stop_radio_operations(wil); + p2p->p2p_dev_started = 0; + mutex_unlock(&wil->p2p_wdev_mutex); + mutex_unlock(&wil->mutex); +} + static struct wireless_dev * wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, @@ -493,6 +531,7 @@ static int wil_cfg80211_del_iface(struct wiphy *wiphy, return -EINVAL; } + wil_cfg80211_stop_p2p_device(wiphy, wdev); wil_p2p_wdev_free(wil); return 0; @@ -877,7 +916,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, wil->bss = bss; /* Connect can take lots of time */ mod_timer(&wil->connect_timer, - jiffies + msecs_to_jiffies(2000)); + jiffies + msecs_to_jiffies(5000)); } else { clear_bit(wil_status_fwconnecting, wil->status); } @@ -1727,34 +1766,6 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy, return 0; } -static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy, - struct wireless_dev *wdev) -{ - struct wil6210_priv *wil = wiphy_to_wil(wiphy); - - wil_dbg_misc(wil, "start_p2p_device: entered\n"); - wil->p2p.p2p_dev_started = 1; - return 0; -} - -static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy, - struct wireless_dev *wdev) -{ - struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct wil_p2p_info *p2p = &wil->p2p; - - if (!p2p->p2p_dev_started) - return; - - wil_dbg_misc(wil, "stop_p2p_device: entered\n"); - mutex_lock(&wil->mutex); - mutex_lock(&wil->p2p_wdev_mutex); - wil_p2p_stop_radio_operations(wil); - p2p->p2p_dev_started = 0; - mutex_unlock(&wil->p2p_wdev_mutex); - mutex_unlock(&wil->mutex); -} - static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, int timeout) @@ -1870,7 +1881,7 @@ static void wil_wiphy_init(struct wiphy *wiphy) wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz; - /* TODO: figure this out */ + /* may change after reading FW capabilities */ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; wiphy->cipher_suites = wil_cipher_suites; @@ -1887,6 +1898,10 @@ static void wil_wiphy_init(struct wiphy *wiphy) wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS; wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE; } + +#ifdef CONFIG_PM + wiphy->wowlan = &wil_wowlan_support; +#endif } struct wireless_dev *wil_cfg80211_init(struct device *dev) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 6eefb9e61ec4..65f39a4343ff 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -20,7 +20,6 @@ #include <linux/pci.h> #include <linux/rtnetlink.h> #include <linux/power_supply.h> - #include "wil6210.h" #include "wmi.h" #include "txrx.h" @@ -30,7 +29,6 @@ static u32 mem_addr; static u32 dbg_txdesc_index; static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */ -u32 vring_idle_trsh = 16; /* HW fetches up to 16 descriptors at once */ enum dbg_off_type { doff_u32 = 0, @@ -1027,6 +1025,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) " TSF = 0x%016llx\n" " TxMCS = %2d TxTpt = %4d\n" " SQI = %4d\n" + " RSSI = %4d\n" " Status = 0x%08x %s\n" " Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n" " Goodput(rx:tx) %4d:%4d\n" @@ -1036,6 +1035,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) le16_to_cpu(reply.evt.bf_mcs), le32_to_cpu(reply.evt.tx_tpt), reply.evt.sqi, + reply.evt.rssi, status, wil_bfstatus_str(status), le16_to_cpu(reply.evt.my_rx_sector), le16_to_cpu(reply.evt.my_tx_sector), @@ -1626,6 +1626,8 @@ static ssize_t wil_write_suspend_stats(struct file *file, struct wil6210_priv *wil = file->private_data; memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); + wil->suspend_stats.min_suspend_time = ULONG_MAX; + wil->suspend_stats.collection_start = ktime_get(); return len; } @@ -1637,18 +1639,27 @@ static ssize_t wil_read_suspend_stats(struct file *file, struct wil6210_priv *wil = file->private_data; static char text[400]; int n; + unsigned long long stats_collection_time = + ktime_to_us(ktime_sub(ktime_get(), + wil->suspend_stats.collection_start)); n = snprintf(text, sizeof(text), "Suspend statistics:\n" "successful suspends:%ld failed suspends:%ld\n" "successful resumes:%ld failed resumes:%ld\n" - "rejected by host:%ld rejected by device:%ld\n", + "rejected by host:%ld rejected by device:%ld\n" + "total suspend time:%lld min suspend time:%lld\n" + "max suspend time:%lld stats collection time: %lld\n", wil->suspend_stats.successful_suspends, wil->suspend_stats.failed_suspends, wil->suspend_stats.successful_resumes, wil->suspend_stats.failed_resumes, wil->suspend_stats.rejected_by_host, - wil->suspend_stats.rejected_by_device); + wil->suspend_stats.rejected_by_device, + wil->suspend_stats.total_suspend_time, + wil->suspend_stats.min_suspend_time, + wil->suspend_stats.max_suspend_time, + stats_collection_time); n = min_t(int, n, sizeof(text)); @@ -1761,6 +1772,7 @@ static const struct dbg_off dbg_wil_off[] = { WIL_FIELD(chip_revision, 0444, doff_u8), WIL_FIELD(abft_len, 0644, doff_u8), WIL_FIELD(wakeup_trigger, 0644, doff_u8), + WIL_FIELD(vring_idle_trsh, 0644, doff_u32), {}, }; @@ -1776,8 +1788,6 @@ static const struct dbg_off dbg_statics[] = { {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32}, {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32}, {"mem_addr", 0644, (ulong)&mem_addr, doff_u32}, - {"vring_idle_trsh", 0644, (ulong)&vring_idle_trsh, - doff_u32}, {"led_polarity", 0644, (ulong)&led_polarity, doff_u8}, {}, }; @@ -1804,6 +1814,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil) wil6210_debugfs_create_ITR_CNT(wil, dbg); + wil->suspend_stats.collection_start = ktime_get(); + return 0; } diff --git a/drivers/net/wireless/ath/wil6210/ftm.c b/drivers/net/wireless/ath/wil6210/ftm.c index 6891a38d7a59..d856e091a5de 100644 --- a/drivers/net/wireless/ath/wil6210/ftm.c +++ b/drivers/net/wireless/ath/wil6210/ftm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -38,6 +38,9 @@ /* initial token to use on non-secure FTM measurement */ #define WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN 2 +/* maximum AOA burst period, limited by FW */ +#define WIL_AOA_MAX_BURST_PERIOD 255 + #define WIL_TOF_FTM_MAX_LCI_LENGTH (240) #define WIL_TOF_FTM_MAX_LCR_LENGTH (240) @@ -62,6 +65,7 @@ nla_policy wil_nl80211_ftm_peer_policy[ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS] = { .type = NLA_U32 }, [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS] = { .type = NLA_NESTED }, [QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID] = { .type = NLA_U8 }, + [QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD] = { .type = NLA_U16 }, [QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ] = { .type = NLA_U32 }, }; @@ -311,8 +315,8 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil, struct wmi_tof_session_start_cmd *cmd; mutex_lock(&wil->ftm.lock); - if (wil->ftm.session_started) { - wil_err(wil, "FTM session already running\n"); + if (wil->ftm.session_started || wil->ftm.aoa_started) { + wil_err(wil, "FTM or AOA session already running\n"); rc = -EAGAIN; goto out; } @@ -356,6 +360,7 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil, } cmd->session_id = cpu_to_le32(WIL_FTM_FW_SESSION_ID); + cmd->aoa_type = request->aoa_type; cmd->num_of_dest = cpu_to_le16(request->n_peers); for (i = 0; i < request->n_peers; i++) { ether_addr_copy(cmd->ftm_dest_info[i].dst_mac, @@ -398,6 +403,8 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil, request->peers[i].params.burst_duration; cmd->ftm_dest_info[i].burst_period = cpu_to_le16(request->peers[i].params.burst_period); + cmd->ftm_dest_info[i].num_burst_per_aoa_meas = + request->peers[i].aoa_burst_period; } rc = wmi_send(wil, WMI_TOF_SESSION_START_CMDID, cmd, cmd_len); @@ -482,8 +489,8 @@ wil_aoa_cfg80211_start_measurement(struct wil6210_priv *wil, mutex_lock(&wil->ftm.lock); - if (wil->ftm.aoa_started) { - wil_err(wil, "AOA measurement already running\n"); + if (wil->ftm.aoa_started || wil->ftm.session_started) { + wil_err(wil, "AOA or FTM measurement already running\n"); rc = -EAGAIN; goto out; } @@ -524,8 +531,8 @@ void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil, mutex_lock(&wil->ftm.lock); - if (!wil->ftm.aoa_started) { - wil_info(wil, "AOA not started, not sending result\n"); + if (!wil->ftm.aoa_started && !wil->ftm.session_started) { + wil_info(wil, "AOA/FTM not started, not sending result\n"); goto out; } @@ -749,6 +756,7 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev, struct nlattr *tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1]; struct nlattr *peer; int rc, n_peers = 0, index = 0, tmp; + u32 aoa_type = 0; if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities)) return -ENOTSUPP; @@ -770,6 +778,14 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev, return -EINVAL; } + if (tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]) { + aoa_type = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]); + if (aoa_type >= QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX) { + wil_err(wil, "invalid AOA type: %d\n", aoa_type); + return -EINVAL; + } + } + nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS], tmp) n_peers++; @@ -793,6 +809,7 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev, request->session_cookie = nla_get_u64(tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]); + request->aoa_type = aoa_type; request->n_peers = n_peers; nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS], tmp) { @@ -821,6 +838,18 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev, if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID]) request->peers[index].secure_token_id = nla_get_u8( tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID]); + if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD]) { + request->peers[index].aoa_burst_period = nla_get_u16( + tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD]); + if (request->peers[index].aoa_burst_period > + WIL_AOA_MAX_BURST_PERIOD) { + wil_err(wil, "Invalid AOA burst period at index: %d\n", + index); + rc = -EINVAL; + goto out; + } + } + rc = wil_ftm_parse_meas_params( wil, tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS], diff --git a/drivers/net/wireless/ath/wil6210/ftm.h b/drivers/net/wireless/ath/wil6210/ftm.h index 8efa292d5ff4..21923c27ec06 100644 --- a/drivers/net/wireless/ath/wil6210/ftm.h +++ b/drivers/net/wireless/ath/wil6210/ftm.h @@ -437,12 +437,14 @@ struct wil_ftm_meas_peer_info { u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_meas_flags */ struct wil_ftm_meas_params params; u8 secure_token_id; + u16 aoa_burst_period; /* 0 if no AOA, >0 every <value> bursts */ }; /* session request, passed to wil_ftm_cfg80211_start_session */ struct wil_ftm_session_request { u64 session_cookie; u32 n_peers; + u32 aoa_type; /* enum qca_wlan_vendor_attr_aoa_type */ /* keep last, variable size according to n_peers */ struct wil_ftm_meas_peer_info peers[0]; }; diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index e01acac88825..7a33792913a3 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -124,24 +124,19 @@ static int fw_ignore_section(struct wil6210_priv *wil, const void *data, return 0; } -static int fw_handle_comment(struct wil6210_priv *wil, const void *data, - size_t size) -{ - wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, data, size, true); - - return 0; -} - static int -fw_handle_capabilities(struct wil6210_priv *wil, const void *data, - size_t size) +fw_handle_comment(struct wil6210_priv *wil, const void *data, + size_t size) { const struct wil_fw_record_capabilities *rec = data; size_t capa_size; if (size < sizeof(*rec) || - le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC) + le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC) { + wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, + data, size, true); return 0; + } capa_size = size - offsetof(struct wil_fw_record_capabilities, capabilities); @@ -422,7 +417,7 @@ static const struct { int (*parse_handler)(struct wil6210_priv *wil, const void *data, size_t size); } wil_fw_handlers[] = { - {wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities}, + {wil_fw_type_comment, fw_handle_comment, fw_handle_comment}, {wil_fw_type_data, fw_handle_data, fw_ignore_section}, {wil_fw_type_fill, fw_handle_fill, fw_ignore_section}, /* wil_fw_type_action */ @@ -517,7 +512,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name, rc = request_firmware(&fw, name, wil_to_dev(wil)); if (rc) { - wil_err_fw(wil, "Failed to load firmware %s\n", name); + wil_err_fw(wil, "Failed to load firmware %s rc %d\n", name, rc); return rc; } wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size); diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index cad8a95c4e4e..59def4f3fcf3 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -244,7 +244,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); if (unlikely(!isr)) { - wil_err(wil, "spurious IRQ: RX\n"); + wil_err_ratelimited(wil, "spurious IRQ: RX\n"); return IRQ_NONE; } @@ -269,11 +269,12 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) need_unmask = false; napi_schedule(&wil->napi_rx); } else { - wil_err(wil, + wil_err_ratelimited( + wil, "Got Rx interrupt while stopping interface\n"); } } else { - wil_err(wil, "Got Rx interrupt while in reset\n"); + wil_err_ratelimited(wil, "Got Rx interrupt while in reset\n"); } } @@ -302,7 +303,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); if (unlikely(!isr)) { - wil_err(wil, "spurious IRQ: TX\n"); + wil_err_ratelimited(wil, "spurious IRQ: TX\n"); return IRQ_NONE; } @@ -318,12 +319,13 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) need_unmask = false; napi_schedule(&wil->napi_tx); } else { - wil_err(wil, "Got Tx interrupt while in reset\n"); + wil_err_ratelimited(wil, "Got Tx interrupt while in reset\n"); } } if (unlikely(isr)) - wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); + wil_err_ratelimited(wil, "un-handled TX ISR bits 0x%08x\n", + isr); /* Tx IRQ will be enabled when NAPI processing finished */ diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 78091b7910c7..d11e1d31fc77 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -395,10 +395,11 @@ static void wil_fw_error_worker(struct work_struct *work) struct wil6210_priv *wil = container_of(work, struct wil6210_priv, fw_error_worker); struct wireless_dev *wdev = wil->wdev; + struct net_device *ndev = wil_to_ndev(wil); wil_dbg_misc(wil, "fw error worker\n"); - if (!netif_running(wil_to_ndev(wil))) { + if (!(ndev->flags & IFF_UP)) { wil_info(wil, "No recovery - interface is down\n"); return; } @@ -581,6 +582,9 @@ int wil_priv_init(struct wil6210_priv *wil) wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST | WMI_WAKEUP_TRIGGER_BCAST; + memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); + wil->suspend_stats.min_suspend_time = ULONG_MAX; + wil->vring_idle_trsh = 16; return 0; @@ -761,6 +765,8 @@ static void wil_collect_fw_info(struct wil6210_priv *wil) u8 retry_short; int rc; + wil_refresh_fw_capabilities(wil); + rc = wmi_get_mgmt_retry(wil, &retry_short); if (!rc) { wiphy->retry_short = retry_short; @@ -768,6 +774,25 @@ static void wil_collect_fw_info(struct wil6210_priv *wil) } } +void wil_refresh_fw_capabilities(struct wil6210_priv *wil) +{ + struct wiphy *wiphy = wil_to_wiphy(wil); + + wil->keep_radio_on_during_sleep = + wil->platform_ops.keep_radio_on_during_sleep && + wil->platform_ops.keep_radio_on_during_sleep( + wil->platform_handle) && + test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities); + + wil_info(wil, "keep_radio_on_during_sleep (%d)\n", + wil->keep_radio_on_during_sleep); + + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) + wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + else + wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; +} + void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) { le32_to_cpus(&r->base); @@ -927,6 +952,29 @@ int wil_ps_update(struct wil6210_priv *wil, enum wmi_ps_profile_type ps_profile) return rc; } +static void wil_pre_fw_config(struct wil6210_priv *wil) +{ + /* Mark FW as loaded from host */ + wil_s(wil, RGF_USER_USAGE_6, 1); + + /* clear any interrupts which on-card-firmware + * may have set + */ + wil6210_clear_irq(wil); + /* CAF_ICR - clear and mask */ + /* it is W1C, clear by writing back same value */ + wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); + wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); + /* clear PAL_UNIT_ICR (potential D0->D3 leftover) */ + wil_s(wil, RGF_PAL_UNIT_ICR + offsetof(struct RGF_ICR, ICR), 0); + + if (wil->fw_calib_result > 0) { + __le32 val = cpu_to_le32(wil->fw_calib_result | + (CALIB_RESULT_SIGNATURE << 8)); + wil_w(wil, RGF_USER_FW_CALIB_RESULT, (u32 __force)val); + } +} + /* * We reset all the structures, and we reset the UMAC. * After calling this routine, you're expected to reload @@ -1024,18 +1072,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (rc) return rc; - /* Mark FW as loaded from host */ - wil_s(wil, RGF_USER_USAGE_6, 1); - - /* clear any interrupts which on-card-firmware - * may have set - */ - wil6210_clear_irq(wil); - /* CAF_ICR - clear and mask */ - /* it is W1C, clear by writing back same value */ - wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); - wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); - + wil_pre_fw_config(wil); wil_release_cpu(wil); } @@ -1061,14 +1098,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) return rc; } + wil_collect_fw_info(wil); + if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT) wil_ps_update(wil, wil->ps_profile); if (wil->tt_data_set) wmi_set_tt_cfg(wil, &wil->tt_data); - wil_collect_fw_info(wil); - if (wil->platform_ops.notify) { rc = wil->platform_ops.notify(wil->platform_handle, WIL_PLATFORM_EVT_FW_RDY); diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index e91cddbacf24..5432b319a52e 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -84,6 +84,7 @@ void wil_set_capabilities(struct wil6210_priv *wil) /* extract FW capabilities from file without loading the FW */ wil_request_firmware(wil, wil->wil_fw_name, false); + wil_refresh_fw_capabilities(wil); } void wil_disable_irq(struct wil6210_priv *wil) @@ -286,15 +287,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) wil_set_capabilities(wil); wil6210_clear_irq(wil); - wil->keep_radio_on_during_sleep = - wil->platform_ops.keep_radio_on_during_sleep && - wil->platform_ops.keep_radio_on_during_sleep( - wil->platform_handle) && - test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities); - - wil_info(wil, "keep_radio_on_during_sleep (%d)\n", - wil->keep_radio_on_during_sleep); - /* FW should raise IRQ when ready */ rc = wil_if_pcie_enable(wil); if (rc) { diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index ce1f384e7f8e..8f5d1b447aaa 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -21,10 +21,11 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) { int rc = 0; struct wireless_dev *wdev = wil->wdev; + struct net_device *ndev = wil_to_ndev(wil); wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system"); - if (!netif_running(wil_to_ndev(wil))) { + if (!(ndev->flags & IFF_UP)) { /* can always sleep when down */ wil_dbg_pm(wil, "Interface is down\n"); goto out; @@ -85,7 +86,9 @@ static int wil_resume_keep_radio_on(struct wil6210_priv *wil) /* Send WMI resume request to the device */ rc = wmi_resume(wil); if (rc) { - wil_err(wil, "device failed to resume (%d), resetting\n", rc); + wil_err(wil, "device failed to resume (%d)\n", rc); + if (no_fw_recovery) + goto out; rc = wil_down(wil); if (rc) { wil_err(wil, "wil_down failed (%d)\n", rc); @@ -298,6 +301,9 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) wil_dbg_pm(wil, "suspend: %s => %d\n", is_runtime ? "runtime" : "system", rc); + if (!rc) + wil->suspend_stats.suspend_start_time = ktime_get(); + return rc; } @@ -307,6 +313,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) struct net_device *ndev = wil_to_ndev(wil); bool keep_radio_on = ndev->flags & IFF_UP && wil->keep_radio_on_during_sleep; + unsigned long long suspend_time_usec = 0; wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); @@ -324,8 +331,20 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) else rc = wil_resume_radio_off(wil); + if (rc) + goto out; + + suspend_time_usec = + ktime_to_us(ktime_sub(ktime_get(), + wil->suspend_stats.suspend_start_time)); + wil->suspend_stats.total_suspend_time += suspend_time_usec; + if (suspend_time_usec < wil->suspend_stats.min_suspend_time) + wil->suspend_stats.min_suspend_time = suspend_time_usec; + if (suspend_time_usec > wil->suspend_stats.max_suspend_time) + wil->suspend_stats.max_suspend_time = suspend_time_usec; + out: - wil_dbg_pm(wil, "resume: %s => %d\n", - is_runtime ? "runtime" : "system", rc); + wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n", + is_runtime ? "runtime" : "system", rc, suspend_time_usec); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 8f1e79b425cf..8fe2239603d1 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1666,7 +1666,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, /* performance monitoring */ used = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used, used + descs_used)) { txdata->idle += get_cycles() - txdata->last_idle; wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", @@ -1813,7 +1813,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, /* performance monitoring */ used = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used, used + nr_frags + 1)) { txdata->idle += get_cycles() - txdata->last_idle; wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", @@ -2175,7 +2175,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) /* performance monitoring */ used_new = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used_new, used_before_complete)) { wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", ringid, used_before_complete, used_new); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 9525f521d215..ef10abc07da6 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -31,7 +31,6 @@ extern bool no_fw_recovery; extern unsigned int mtu_max; extern unsigned short rx_ring_overflow_thrsh; extern int agg_wsize; -extern u32 vring_idle_trsh; extern bool rx_align_2; extern bool rx_large_buf; extern bool debug_fw; @@ -91,6 +90,11 @@ struct wil_suspend_stats { unsigned long failed_resumes; unsigned long rejected_by_device; unsigned long rejected_by_host; + unsigned long long total_suspend_time; + unsigned long long min_suspend_time; + unsigned long long max_suspend_time; + ktime_t collection_start; + ktime_t suspend_start_time; }; /* Calculate MAC buffer size for the firmware. It includes all overhead, @@ -170,6 +174,10 @@ struct RGF_ICR { #define RGF_USER_USER_SCRATCH_PAD (0x8802bc) #define RGF_USER_BL (0x880A3C) /* Boot Loader */ #define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */ +#define RGF_USER_FW_CALIB_RESULT (0x880a90) /* b0-7:result + * b8-15:signature + */ + #define CALIB_RESULT_SIGNATURE (0x11) #define RGF_USER_CLKS_CTL_0 (0x880abc) #define BIT_USER_CLKS_CAR_AHB_SW_SEL BIT(1) /* ref clk/PLL */ #define BIT_USER_CLKS_RST_PWGD BIT(11) /* reset on "power good" */ @@ -267,6 +275,7 @@ struct RGF_ICR { #define BIT_DMA_PSEUDO_CAUSE_MISC BIT(2) #define RGF_HP_CTRL (0x88265c) +#define RGF_PAL_UNIT_ICR (0x88266c) /* struct RGF_ICR */ #define RGF_PCIE_LOS_COUNTER_CTL (0x882dc4) /* MAC timer, usec, for packet lifetime */ @@ -692,6 +701,7 @@ struct wil6210_priv { u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ struct wil_sta_info sta[WIL6210_MAX_CID]; int bcast_vring; + u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */ bool use_extended_dma_addr; /* indicates whether we are using 48 bits */ /* scan */ struct cfg80211_scan_request *scan_request; @@ -731,6 +741,8 @@ struct wil6210_priv { bool tt_data_set; struct wmi_tt_data tt_data; + int fw_calib_result; + #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notify; @@ -861,6 +873,7 @@ int wil_up(struct wil6210_priv *wil); int __wil_up(struct wil6210_priv *wil); int wil_down(struct wil6210_priv *wil); int __wil_down(struct wil6210_priv *wil); +void wil_refresh_fw_capabilities(struct wil6210_priv *wil); void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r); int wil_find_cid(struct wil6210_priv *wil, const u8 *mac); void wil_set_ethtoolops(struct net_device *ndev); @@ -943,8 +956,13 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); +#if defined(CONFIG_WIL6210_DEBUGFS) int wil6210_debugfs_init(struct wil6210_priv *wil); void wil6210_debugfs_remove(struct wil6210_priv *wil); +#else +static inline int wil6210_debugfs_init(struct wil6210_priv *wil) { return 0; } +static inline void wil6210_debugfs_remove(struct wil6210_priv *wil) {} +#endif int wil6210_sysfs_init(struct wil6210_priv *wil); void wil6210_sysfs_remove(struct wil6210_priv *wil); int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index fba0d6a79ae2..e421fdad81e2 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -345,6 +345,11 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) strlcpy(wdev->wiphy->fw_version, wil->fw_version, sizeof(wdev->wiphy->fw_version)); + if (len > offsetof(struct wmi_ready_event, rfc_read_calib_result)) { + wil_dbg_wmi(wil, "rfc calibration result %d\n", + evt->rfc_read_calib_result); + wil->fw_calib_result = evt->rfc_read_calib_result; + } wil_set_recovery_state(wil, fw_recovery_idle); set_bit(wil_status_fwready, wil->status); /* let the reset sequence continue */ @@ -382,12 +387,15 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) ch_no = data->info.channel + 1; freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ); channel = ieee80211_get_channel(wiphy, freq); - signal = data->info.sqi; + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) + signal = 100 * data->info.rssi; + else + signal = data->info.sqi; d_status = le16_to_cpu(data->info.status); fc = rx_mgmt_frame->frame_control; - wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n", - data->info.channel, data->info.mcs, data->info.snr, + wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d RSSI %d SQI %d%%\n", + data->info.channel, data->info.mcs, data->info.rssi, data->info.sqi); wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len, le16_to_cpu(fc)); diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 256f63c57da0..5263ee717a4f 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -36,6 +36,11 @@ #define WMI_PROX_RANGE_NUM (3) #define WMI_MAX_LOSS_DMG_BEACONS (20) #define MAX_NUM_OF_SECTORS (128) +#define WMI_SCHED_MAX_ALLOCS_PER_CMD (4) +#define WMI_RF_DTYPE_LENGTH (3) +#define WMI_RF_ETYPE_LENGTH (3) +#define WMI_RF_RX2TX_LENGTH (3) +#define WMI_RF_ETYPE_VAL_PER_RANGE (5) /* Mailbox interface * used for commands and events @@ -52,14 +57,20 @@ enum wmi_mid { * the host */ enum wmi_fw_capability { - WMI_FW_CAPABILITY_FTM = 0, - WMI_FW_CAPABILITY_PS_CONFIG = 1, - WMI_FW_CAPABILITY_RF_SECTORS = 2, - WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, - WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, - WMI_FW_CAPABILITY_WMI_ONLY = 5, - WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7, - WMI_FW_CAPABILITY_D3_SUSPEND = 8, + WMI_FW_CAPABILITY_FTM = 0, + WMI_FW_CAPABILITY_PS_CONFIG = 1, + WMI_FW_CAPABILITY_RF_SECTORS = 2, + WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, + WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, + WMI_FW_CAPABILITY_WMI_ONLY = 5, + WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7, + WMI_FW_CAPABILITY_D3_SUSPEND = 8, + WMI_FW_CAPABILITY_LONG_RANGE = 9, + WMI_FW_CAPABILITY_FIXED_SCHEDULING = 10, + WMI_FW_CAPABILITY_MULTI_DIRECTED_OMNIS = 11, + WMI_FW_CAPABILITY_RSSI_REPORTING = 12, + WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13, + WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14, WMI_FW_CAPABILITY_MAX, }; @@ -79,6 +90,7 @@ enum wmi_command_id { WMI_START_SCAN_CMDID = 0x07, WMI_SET_BSS_FILTER_CMDID = 0x09, WMI_SET_PROBED_SSID_CMDID = 0x0A, + /* deprecated */ WMI_SET_LISTEN_INT_CMDID = 0x0B, WMI_BCON_CTRL_CMDID = 0x0F, WMI_ADD_CIPHER_KEY_CMDID = 0x16, @@ -93,26 +105,28 @@ enum wmi_command_id { WMI_ECHO_CMDID = 0x803, WMI_DEEP_ECHO_CMDID = 0x804, WMI_CONFIG_MAC_CMDID = 0x805, + /* deprecated */ WMI_CONFIG_PHY_DEBUG_CMDID = 0x806, WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808, WMI_PHY_GET_STATISTICS_CMDID = 0x809, + /* deprecated */ WMI_FS_TUNE_CMDID = 0x80A, + /* deprecated */ WMI_CORR_MEASURE_CMDID = 0x80B, WMI_READ_RSSI_CMDID = 0x80C, WMI_TEMP_SENSE_CMDID = 0x80E, WMI_DC_CALIB_CMDID = 0x80F, + /* deprecated */ WMI_SEND_TONE_CMDID = 0x810, + /* deprecated */ WMI_IQ_TX_CALIB_CMDID = 0x811, + /* deprecated */ WMI_IQ_RX_CALIB_CMDID = 0x812, - WMI_SET_UCODE_IDLE_CMDID = 0x813, WMI_SET_WORK_MODE_CMDID = 0x815, WMI_LO_LEAKAGE_CALIB_CMDID = 0x816, - WMI_MARLON_R_READ_CMDID = 0x818, - WMI_MARLON_R_WRITE_CMDID = 0x819, - WMI_MARLON_R_TXRX_SEL_CMDID = 0x81A, - MAC_IO_STATIC_PARAMS_CMDID = 0x81B, - MAC_IO_DYNAMIC_PARAMS_CMDID = 0x81C, + WMI_LO_POWER_CALIB_FROM_OTP_CMDID = 0x817, WMI_SILENT_RSSI_CALIB_CMDID = 0x81D, + /* deprecated */ WMI_RF_RX_TEST_CMDID = 0x81E, WMI_CFG_RX_CHAIN_CMDID = 0x820, WMI_VRING_CFG_CMDID = 0x821, @@ -126,11 +140,6 @@ enum wmi_command_id { WMI_SET_PCP_CHANNEL_CMDID = 0x829, WMI_GET_PCP_CHANNEL_CMDID = 0x82A, WMI_SW_TX_REQ_CMDID = 0x82B, - WMI_READ_MAC_RXQ_CMDID = 0x830, - WMI_READ_MAC_TXQ_CMDID = 0x831, - WMI_WRITE_MAC_RXQ_CMDID = 0x832, - WMI_WRITE_MAC_TXQ_CMDID = 0x833, - WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x834, WMI_MLME_PUSH_CMDID = 0x835, WMI_BEAMFORMING_MGMT_CMDID = 0x836, WMI_BF_TXSS_MGMT_CMDID = 0x837, @@ -144,9 +153,13 @@ enum wmi_command_id { WMI_MAINTAIN_RESUME_CMDID = 0x851, WMI_RS_MGMT_CMDID = 0x852, WMI_RF_MGMT_CMDID = 0x853, - WMI_OTP_READ_CMDID = 0x856, - WMI_OTP_WRITE_CMDID = 0x857, + WMI_RF_XPM_READ_CMDID = 0x856, + WMI_RF_XPM_WRITE_CMDID = 0x857, WMI_LED_CFG_CMDID = 0x858, + WMI_SET_CONNECT_SNR_THR_CMDID = 0x85B, + WMI_SET_ACTIVE_SILENT_RSSI_TABLE_CMDID = 0x85C, + WMI_RF_PWR_ON_DELAY_CMDID = 0x85D, + WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID = 0x85E, /* Performance monitoring commands */ WMI_BF_CTRL_CMDID = 0x862, WMI_NOTIFY_REQ_CMDID = 0x863, @@ -154,7 +167,6 @@ enum wmi_command_id { WMI_GET_RF_STATUS_CMDID = 0x866, WMI_GET_BASEBAND_TYPE_CMDID = 0x867, WMI_UNIT_TEST_CMDID = 0x900, - WMI_HICCUP_CMDID = 0x901, WMI_FLASH_READ_CMDID = 0x902, WMI_FLASH_WRITE_CMDID = 0x903, /* Power management */ @@ -174,16 +186,6 @@ enum wmi_command_id { WMI_GET_PCP_FACTOR_CMDID = 0x91B, /* Power Save Configuration Commands */ WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C, - /* Not supported yet */ - WMI_PS_DEV_CFG_CMDID = 0x91D, - /* Not supported yet */ - WMI_PS_DEV_CFG_READ_CMDID = 0x91E, - /* Per MAC Power Save Configuration commands - * Not supported yet - */ - WMI_PS_MID_CFG_CMDID = 0x91F, - /* Not supported yet */ - WMI_PS_MID_CFG_READ_CMDID = 0x920, WMI_RS_CFG_CMDID = 0x921, WMI_GET_DETAILED_RS_RES_CMDID = 0x922, WMI_AOA_MEAS_CMDID = 0x923, @@ -194,13 +196,16 @@ enum wmi_command_id { WMI_DEL_STA_CMDID = 0x936, WMI_SET_THERMAL_THROTTLING_CFG_CMDID = 0x940, WMI_GET_THERMAL_THROTTLING_CFG_CMDID = 0x941, + /* Read Power Save profile type */ + WMI_PS_DEV_PROFILE_CFG_READ_CMDID = 0x942, WMI_TOF_SESSION_START_CMDID = 0x991, WMI_TOF_GET_CAPABILITIES_CMDID = 0x992, WMI_TOF_SET_LCR_CMDID = 0x993, WMI_TOF_SET_LCI_CMDID = 0x994, - WMI_TOF_CHANNEL_INFO_CMDID = 0x995, + WMI_TOF_CFG_RESPONDER_CMDID = 0x996, WMI_TOF_SET_TX_RX_OFFSET_CMDID = 0x997, WMI_TOF_GET_TX_RX_OFFSET_CMDID = 0x998, + WMI_TOF_CHANNEL_INFO_CMDID = 0x999, WMI_GET_RF_SECTOR_PARAMS_CMDID = 0x9A0, WMI_SET_RF_SECTOR_PARAMS_CMDID = 0x9A1, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID = 0x9A2, @@ -209,12 +214,20 @@ enum wmi_command_id { WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5, WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7, + WMI_SCHEDULING_SCHEME_CMDID = 0xA01, + WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02, + WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03, + WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_CMDID = 0xA04, + WMI_SET_LONG_RANGE_CONFIG_CMDID = 0xA05, WMI_SET_MAC_ADDRESS_CMDID = 0xF003, WMI_ABORT_SCAN_CMDID = 0xF007, WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041, + /* deprecated */ WMI_GET_PMK_CMDID = 0xF048, WMI_SET_PASSPHRASE_CMDID = 0xF049, + /* deprecated */ WMI_SEND_ASSOC_RES_CMDID = 0xF04A, + /* deprecated */ WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B, WMI_MAC_ADDR_REQ_CMDID = 0xF04D, WMI_FW_VER_CMDID = 0xF04E, @@ -440,11 +453,6 @@ struct wmi_rf_mgmt_cmd { __le32 rf_mgmt_type; } __packed; -/* WMI_RF_RX_TEST_CMDID */ -struct wmi_rf_rx_test_cmd { - __le32 sector; -} __packed; - /* WMI_CORR_MEASURE_CMDID */ struct wmi_corr_measure_cmd { __le32 freq_mhz; @@ -657,6 +665,20 @@ struct wmi_bcast_vring_cfg_cmd { struct wmi_bcast_vring_cfg vring_cfg; } __packed; +/* WMI_LO_POWER_CALIB_FROM_OTP_CMDID */ +struct wmi_lo_power_calib_from_otp_cmd { + /* index to read from OTP. zero based */ + u8 index; + u8 reserved[3]; +} __packed; + +/* WMI_LO_POWER_CALIB_FROM_OTP_EVENTID */ +struct wmi_lo_power_calib_from_otp_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* WMI_VRING_BA_EN_CMDID */ struct wmi_vring_ba_en_cmd { u8 ringid; @@ -692,6 +714,24 @@ enum wmi_sniffer_cfg_mode { WMI_SNIFFER_ON = 0x01, }; +/* WMI_SILENT_RSSI_TABLE */ +enum wmi_silent_rssi_table { + RF_TEMPERATURE_CALIB_DEFAULT_DB = 0x00, + RF_TEMPERATURE_CALIB_HIGH_POWER_DB = 0x01, +}; + +/* WMI_SILENT_RSSI_STATUS */ +enum wmi_silent_rssi_status { + SILENT_RSSI_SUCCESS = 0x00, + SILENT_RSSI_FAILURE = 0x01, +}; + +/* WMI_SET_ACTIVE_SILENT_RSSI_TABLE_CMDID */ +struct wmi_set_active_silent_rssi_table_cmd { + /* enum wmi_silent_rssi_table */ + __le32 table; +} __packed; + enum wmi_sniffer_cfg_phy_info_mode { WMI_SNIFFER_PHY_INFO_DISABLED = 0x00, WMI_SNIFFER_PHY_INFO_ENABLED = 0x01, @@ -835,18 +875,85 @@ struct wmi_echo_cmd { __le32 value; } __packed; -/* WMI_OTP_READ_CMDID */ -struct wmi_otp_read_cmd { - __le32 addr; - __le32 size; - __le32 values; +/* WMI_RF_PWR_ON_DELAY_CMDID + * set FW time parameters used through RF resetting + * RF reset consists of bringing its power down for a period of time, then + * bringing the power up + * Returned event: WMI_RF_PWR_ON_DELAY_RSP_EVENTID + */ +struct wmi_rf_pwr_on_delay_cmd { + /* time in usec the FW waits after bringing the RF PWR down, + * set 0 for default + */ + __le16 down_delay_usec; + /* time in usec the FW waits after bringing the RF PWR up, + * set 0 for default + */ + __le16 up_delay_usec; +} __packed; + +/* \WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID + * This API controls the Tx and Rx gain over temperature. + * It controls the Tx D-type, Rx D-type and Rx E-type amplifiers. + * It also controls the Tx gain index, by controlling the Rx to Tx gain index + * offset. + * The control is divided by 3 temperature values to 4 temperature ranges. + * Each parameter uses its own temperature values. + * Returned event: WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID + */ +struct wmi_set_high_power_table_params_cmd { + /* Temperature range for Tx D-type parameters */ + u8 tx_dtype_temp[WMI_RF_DTYPE_LENGTH]; + u8 reserved0; + /* Tx D-type values to be used for each temperature range */ + __le32 tx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + /* Temperature range for Rx D-type parameters */ + u8 rx_dtype_temp[WMI_RF_DTYPE_LENGTH]; + u8 reserved1; + /* Rx D-type values to be used for each temperature range */ + __le32 rx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + /* Temperature range for Rx E-type parameters */ + u8 rx_etype_temp[WMI_RF_ETYPE_LENGTH]; + u8 reserved2; + /* Rx E-type values to be used for each temperature range. + * The last 4 values of any range are the first 4 values of the next + * range and so on + */ + __le32 rx_etype_conf[WMI_RF_ETYPE_VAL_PER_RANGE + WMI_RF_ETYPE_LENGTH]; + /* Temperature range for rx_2_tx_offs parameters */ + u8 rx_2_tx_temp[WMI_RF_RX2TX_LENGTH]; + u8 reserved3; + /* Rx to Tx gain index offset */ + s8 rx_2_tx_offs[WMI_RF_RX2TX_LENGTH + 1]; +} __packed; + +/* CMD: WMI_RF_XPM_READ_CMDID */ +struct wmi_rf_xpm_read_cmd { + u8 rf_id; + u8 reserved[3]; + /* XPM bit start address in range [0,8191]bits - rounded by FW to + * multiple of 8bits + */ + __le32 xpm_bit_address; + __le32 num_bytes; } __packed; -/* WMI_OTP_WRITE_CMDID */ -struct wmi_otp_write_cmd { - __le32 addr; - __le32 size; - __le32 values; +/* CMD: WMI_RF_XPM_WRITE_CMDID */ +struct wmi_rf_xpm_write_cmd { + u8 rf_id; + u8 reserved0[3]; + /* XPM bit start address in range [0,8191]bits - rounded by FW to + * multiple of 8bits + */ + __le32 xpm_bit_address; + __le32 num_bytes; + /* boolean flag indicating whether FW should verify the write + * operation + */ + u8 verify; + u8 reserved1[3]; + /* actual size=num_bytes */ + u8 data_bytes[0]; } __packed; /* WMI_TEMP_SENSE_CMDID @@ -989,19 +1096,26 @@ struct wmi_ftm_dest_info { */ __le16 burst_period; u8 dst_mac[WMI_MAC_LEN]; - __le16 reserved; + u8 reserved; + u8 num_burst_per_aoa_meas; } __packed; /* WMI_TOF_SESSION_START_CMDID */ struct wmi_tof_session_start_cmd { __le32 session_id; - u8 num_of_aoa_measures; + u8 reserved1; u8 aoa_type; __le16 num_of_dest; u8 reserved[4]; struct wmi_ftm_dest_info ftm_dest_info[0]; } __packed; +/* WMI_TOF_CFG_RESPONDER_CMDID */ +struct wmi_tof_cfg_responder_cmd { + u8 enable; + u8 reserved[3]; +} __packed; + enum wmi_tof_channel_info_report_type { WMI_TOF_CHANNEL_INFO_TYPE_CIR = 0x1, WMI_TOF_CHANNEL_INFO_TYPE_RSSI = 0x2, @@ -1022,7 +1136,99 @@ struct wmi_tof_set_tx_rx_offset_cmd { __le32 tx_offset; /* RX delay offset */ __le32 rx_offset; - __le32 reserved[2]; + /* Mask to define which RFs to configure. 0 means all RFs */ + __le32 rf_mask; + /* Offset to strongest tap of CIR */ + __le32 precursor; +} __packed; + +/* WMI_TOF_GET_TX_RX_OFFSET_CMDID */ +struct wmi_tof_get_tx_rx_offset_cmd { + /* rf index to read offsets from */ + u8 rf_index; + u8 reserved[3]; +} __packed; + +/* WMI_FIXED_SCHEDULING_CONFIG_CMDID */ +struct wmi_map_mcs_to_schd_params { + u8 mcs; + /* time in usec from start slot to start tx flow - default 15 */ + u8 time_in_usec_before_initiate_tx; + /* RD enable - if yes consider RD according to STA mcs */ + u8 rd_enabled; + u8 reserved; + /* time in usec from start slot to stop vring */ + __le16 time_in_usec_to_stop_vring; + /* timeout to force flush from start of slot */ + __le16 flush_to_in_usec; + /* per mcs the mac buffer limit size in bytes */ + __le32 mac_buff_size_in_bytes; +} __packed; + +/* WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID */ +struct wmi_fixed_scheduling_config_complete_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +#define WMI_NUM_MCS (13) + +/* WMI_FIXED_SCHEDULING_CONFIG_CMDID */ +struct wmi_fixed_scheduling_config_cmd { + /* defaults in the SAS table */ + struct wmi_map_mcs_to_schd_params mcs_to_schd_params_map[WMI_NUM_MCS]; + /* default 150 uSec */ + __le16 max_sta_rd_ppdu_duration_in_usec; + /* default 300 uSec */ + __le16 max_sta_grant_ppdu_duration_in_usec; + /* default 1000 uSec */ + __le16 assoc_slot_duration_in_usec; + /* default 360 uSec */ + __le16 virtual_slot_duration_in_usec; + /* each this field value slots start with grant frame to the station + * - default 2 + */ + u8 number_of_ap_slots_for_initiate_grant; + u8 reserved[3]; +} __packed; + +/* WMI_ENABLE_FIXED_SCHEDULING_CMDID */ +struct wmi_enable_fixed_scheduling_cmd { + __le32 reserved; +} __packed; + +/* WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID */ +struct wmi_enable_fixed_scheduling_complete_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_CMDID */ +struct wmi_set_multi_directed_omnis_config_cmd { + /* number of directed omnis at destination AP */ + u8 dest_ap_num_directed_omnis; + u8 reserved[3]; +} __packed; + +/* WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_EVENTID */ +struct wmi_set_multi_directed_omnis_config_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_LONG_RANGE_CONFIG_CMDID */ +struct wmi_set_long_range_config_cmd { + __le32 reserved; +} __packed; + +/* WMI_SET_LONG_RANGE_CONFIG_COMPLETE_EVENTID */ +struct wmi_set_long_range_config_complete_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; } __packed; /* WMI Events @@ -1038,19 +1244,22 @@ enum wmi_event_id { WMI_FW_READY_EVENTID = 0x1801, WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200, WMI_ECHO_RSP_EVENTID = 0x1803, + /* deprecated */ WMI_FS_TUNE_DONE_EVENTID = 0x180A, + /* deprecated */ WMI_CORR_MEASURE_EVENTID = 0x180B, WMI_READ_RSSI_EVENTID = 0x180C, WMI_TEMP_SENSE_DONE_EVENTID = 0x180E, WMI_DC_CALIB_DONE_EVENTID = 0x180F, + /* deprecated */ WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811, + /* deprecated */ WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812, WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815, WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816, - WMI_MARLON_R_READ_DONE_EVENTID = 0x1818, - WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819, - WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A, + WMI_LO_POWER_CALIB_FROM_OTP_EVENTID = 0x1817, WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D, + /* deprecated */ WMI_RF_RX_TEST_DONE_EVENTID = 0x181E, WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820, WMI_VRING_CFG_DONE_EVENTID = 0x1821, @@ -1061,11 +1270,6 @@ enum wmi_event_id { WMI_GET_SSID_EVENTID = 0x1828, WMI_GET_PCP_CHANNEL_EVENTID = 0x182A, WMI_SW_TX_COMPLETE_EVENTID = 0x182B, - WMI_READ_MAC_RXQ_EVENTID = 0x1830, - WMI_READ_MAC_TXQ_EVENTID = 0x1831, - WMI_WRITE_MAC_RXQ_EVENTID = 0x1832, - WMI_WRITE_MAC_TXQ_EVENTID = 0x1833, - WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834, WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836, WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837, WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839, @@ -1076,8 +1280,12 @@ enum wmi_event_id { WMI_TX_MGMT_PACKET_EVENTID = 0x1841, WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842, WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843, - WMI_OTP_READ_RESULT_EVENTID = 0x1856, + WMI_RF_XPM_READ_RESULT_EVENTID = 0x1856, + WMI_RF_XPM_WRITE_RESULT_EVENTID = 0x1857, WMI_LED_CFG_DONE_EVENTID = 0x1858, + WMI_SET_SILENT_RSSI_TABLE_DONE_EVENTID = 0x185C, + WMI_RF_PWR_ON_DELAY_RSP_EVENTID = 0x185D, + WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID = 0x185E, /* Performance monitoring events */ WMI_DATA_PORT_OPEN_EVENTID = 0x1860, WMI_WBE_LINK_DOWN_EVENTID = 0x1861, @@ -1106,14 +1314,6 @@ enum wmi_event_id { WMI_PCP_FACTOR_EVENTID = 0x191A, /* Power Save Configuration Events */ WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C, - /* Not supported yet */ - WMI_PS_DEV_CFG_EVENTID = 0x191D, - /* Not supported yet */ - WMI_PS_DEV_CFG_READ_EVENTID = 0x191E, - /* Not supported yet */ - WMI_PS_MID_CFG_EVENTID = 0x191F, - /* Not supported yet */ - WMI_PS_MID_CFG_READ_EVENTID = 0x1920, WMI_RS_CFG_DONE_EVENTID = 0x1921, WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922, WMI_AOA_MEAS_EVENTID = 0x1923, @@ -1122,14 +1322,17 @@ enum wmi_event_id { WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931, WMI_SET_THERMAL_THROTTLING_CFG_EVENTID = 0x1940, WMI_GET_THERMAL_THROTTLING_CFG_EVENTID = 0x1941, + /* return the Power Save profile */ + WMI_PS_DEV_PROFILE_CFG_READ_EVENTID = 0x1942, WMI_TOF_SESSION_END_EVENTID = 0x1991, WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992, WMI_TOF_SET_LCR_EVENTID = 0x1993, WMI_TOF_SET_LCI_EVENTID = 0x1994, WMI_TOF_FTM_PER_DEST_RES_EVENTID = 0x1995, - WMI_TOF_CHANNEL_INFO_EVENTID = 0x1996, + WMI_TOF_CFG_RESPONDER_EVENTID = 0x1996, WMI_TOF_SET_TX_RX_OFFSET_EVENTID = 0x1997, WMI_TOF_GET_TX_RX_OFFSET_EVENTID = 0x1998, + WMI_TOF_CHANNEL_INFO_EVENTID = 0x1999, WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A0, WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID = 0x19A1, WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID = 0x19A2, @@ -1138,12 +1341,18 @@ enum wmi_event_id { WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5, WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7, + WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01, + WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02, + WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03, + WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_EVENTID = 0x1A04, + WMI_SET_LONG_RANGE_CONFIG_COMPLETE_EVENTID = 0x1A05, WMI_SET_CHANNEL_EVENTID = 0x9000, WMI_ASSOC_REQ_EVENTID = 0x9001, WMI_EAPOL_RX_EVENTID = 0x9002, WMI_MAC_ADDR_RESP_EVENTID = 0x9003, WMI_FW_VER_EVENTID = 0x9004, WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005, + WMI_COMMAND_NOT_SUPPORTED_EVENTID = 0xFFFF, }; /* Events data structures */ @@ -1200,7 +1409,7 @@ struct wmi_fw_ver_event { __le32 bl_minor; __le32 bl_subminor; __le32 bl_build; - /* The number of entries in the FW capabilies array */ + /* The number of entries in the FW capabilities array */ u8 fw_capabilities_len; u8 reserved[3]; /* FW capabilities info @@ -1245,7 +1454,9 @@ struct wmi_get_rf_status_event { __le32 board_file_platform_type; /* board file version */ __le32 board_file_version; - __le32 reserved[2]; + /* enabled XIFs bit vector */ + __le32 enabled_xif_vector; + __le32 reserved; } __packed; /* WMI_GET_BASEBAND_TYPE_EVENTID */ @@ -1299,6 +1510,9 @@ struct wmi_ready_event { /* enum wmi_phy_capability */ u8 phy_capability; u8 numof_additional_mids; + /* rfc read calibration result. 5..15 */ + u8 rfc_read_calib_result; + u8 reserved[3]; } __packed; /* WMI_NOTIFY_REQ_DONE_EVENTID */ @@ -1306,7 +1520,8 @@ struct wmi_notify_req_done_event { /* beamforming status, 0: fail; 1: OK; 2: retrying */ __le32 status; __le64 tsf; - __le32 snr_val; + s8 rssi; + u8 reserved0[3]; __le32 tx_tpt; __le32 tx_goodput; __le32 rx_goodput; @@ -1576,7 +1791,7 @@ struct wmi_sw_tx_complete_event { u8 reserved[3]; } __packed; -/* WMI_CORR_MEASURE_EVENTID */ +/* WMI_CORR_MEASURE_EVENTID - deprecated */ struct wmi_corr_measure_event { /* signed */ __le32 i; @@ -1602,31 +1817,35 @@ struct wmi_get_ssid_event { /* wmi_rx_mgmt_info */ struct wmi_rx_mgmt_info { u8 mcs; - s8 snr; + s8 rssi; u8 range; u8 sqi; __le16 stype; __le16 status; __le32 len; - /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */ + /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */ u8 qid; - /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */ + /* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */ u8 mid; u8 cid; /* From Radio MNGR */ u8 channel; } __packed; -/* wmi_otp_read_write_cmd */ -struct wmi_otp_read_write_cmd { - __le32 addr; - __le32 size; - u8 values[0]; +/* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */ +struct wmi_rf_xpm_read_result_event { + /* enum wmi_fw_status_e - success=0 or fail=1 */ + u8 status; + u8 reserved[3]; + /* requested num_bytes of data */ + u8 data_bytes[0]; } __packed; -/* WMI_OTP_READ_RESULT_EVENTID */ -struct wmi_otp_read_result_event { - u8 payload[0]; +/* EVENT: WMI_RF_XPM_WRITE_RESULT_EVENTID */ +struct wmi_rf_xpm_write_result_event { + /* enum wmi_fw_status_e - success=0 or fail=1 */ + u8 status; + u8 reserved[3]; } __packed; /* WMI_TX_MGMT_PACKET_EVENTID */ @@ -1645,6 +1864,20 @@ struct wmi_echo_rsp_event { __le32 echoed_value; } __packed; +/* WMI_RF_PWR_ON_DELAY_RSP_EVENTID */ +struct wmi_rf_pwr_on_delay_rsp_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID */ +struct wmi_set_high_power_table_params_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* WMI_TEMP_SENSE_DONE_EVENTID * * Measure MAC and radio temperatures @@ -1722,14 +1955,22 @@ struct wmi_led_cfg_cmd { u8 reserved; } __packed; +/* \WMI_SET_CONNECT_SNR_THR_CMDID */ +struct wmi_set_connect_snr_thr_cmd { + u8 enable; + u8 reserved; + /* 1/4 Db units */ + __le16 omni_snr_thr; + /* 1/4 Db units */ + __le16 direct_snr_thr; +} __packed; + /* WMI_LED_CFG_DONE_EVENTID */ struct wmi_led_cfg_done_event { /* led config status */ __le32 status; } __packed; -#define WMI_NUM_MCS (13) - /* Rate search parameters configuration per connection */ struct wmi_rs_cfg { /* The maximal allowed PER for each MCS @@ -1754,6 +1995,98 @@ struct wmi_rs_cfg { __le32 mcs_en_vec; } __packed; +/* Slot types */ +enum wmi_sched_scheme_slot_type { + WMI_SCHED_SLOT_SP = 0x0, + WMI_SCHED_SLOT_CBAP = 0x1, + WMI_SCHED_SLOT_IDLE = 0x2, + WMI_SCHED_SLOT_ANNOUNCE_NO_ACK = 0x3, + WMI_SCHED_SLOT_DISCOVERY = 0x4, +}; + +enum wmi_sched_scheme_slot_flags { + WMI_SCHED_SCHEME_SLOT_PERIODIC = 0x1, +}; + +struct wmi_sched_scheme_slot { + /* in microsecond */ + __le32 tbtt_offset; + /* wmi_sched_scheme_slot_flags */ + u8 flags; + /* wmi_sched_scheme_slot_type */ + u8 type; + /* in microsecond */ + __le16 duration; + /* frame_exchange_sequence_duration */ + __le16 tx_op; + /* time in microseconds between two consecutive slots + * relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set + */ + __le16 period; + /* relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set + * number of times to repeat allocation + */ + u8 num_of_blocks; + /* relevant only if flag WMI_SCHED_SCHEME_SLOT_PERIODIC set + * every idle_period allocation will be idle + */ + u8 idle_period; + u8 src_aid; + u8 dest_aid; + __le32 reserved; +} __packed; + +enum wmi_sched_scheme_flags { + /* should not be set when clearing scheduling scheme */ + WMI_SCHED_SCHEME_ENABLE = 0x01, + WMI_SCHED_PROTECTED_SP = 0x02, + /* should be set only on first WMI fragment of scheme */ + WMI_SCHED_FIRST = 0x04, + /* should be set only on last WMI fragment of scheme */ + WMI_SCHED_LAST = 0x08, + WMI_SCHED_IMMEDIATE_START = 0x10, +}; + +enum wmi_sched_scheme_advertisment { + /* ESE is not advertised at all, STA has to be configured with WMI + * also + */ + WMI_ADVERTISE_ESE_DISABLED = 0x0, + WMI_ADVERTISE_ESE_IN_BEACON = 0x1, + WMI_ADVERTISE_ESE_IN_ANNOUNCE_FRAME = 0x2, +}; + +/* WMI_SCHEDULING_SCHEME_CMD */ +struct wmi_scheduling_scheme_cmd { + u8 serial_num; + /* wmi_sched_scheme_advertisment */ + u8 ese_advertisment; + /* wmi_sched_scheme_flags */ + __le16 flags; + u8 num_allocs; + u8 reserved[3]; + __le64 start_tbtt; + /* allocations list */ + struct wmi_sched_scheme_slot allocs[WMI_SCHED_MAX_ALLOCS_PER_CMD]; +} __packed; + +enum wmi_sched_scheme_failure_type { + WMI_SCHED_SCHEME_FAILURE_NO_ERROR = 0x00, + WMI_SCHED_SCHEME_FAILURE_OLD_START_TSF_ERR = 0x01, +}; + +/* WMI_SCHEDULING_SCHEME_EVENTID */ +struct wmi_scheduling_scheme_event { + /* wmi_fw_status_e */ + u8 status; + /* serial number given in command */ + u8 serial_num; + /* wmi_sched_scheme_failure_type */ + u8 failure_type; + /* alignment to 32b */ + u8 reserved[1]; +} __packed; + /* WMI_RS_CFG_CMDID */ struct wmi_rs_cfg_cmd { /* connection id */ @@ -1971,6 +2304,19 @@ enum wmi_ps_profile_type { WMI_PS_PROFILE_TYPE_LOW_LATENCY_PS = 0x03, }; +/* WMI_PS_DEV_PROFILE_CFG_READ_CMDID */ +struct wmi_ps_dev_profile_cfg_read_cmd { + /* reserved */ + __le32 reserved; +} __packed; + +/* WMI_PS_DEV_PROFILE_CFG_READ_EVENTID */ +struct wmi_ps_dev_profile_cfg_read_event { + /* wmi_ps_profile_type_e */ + u8 ps_profile; + u8 reserved[3]; +} __packed; + /* WMI_PS_DEV_PROFILE_CFG_CMDID * * Power save profile to be used by the device @@ -2019,157 +2365,6 @@ enum wmi_ps_d3_resp_policy { WMI_PS_D3_RESP_POLICY_APPROVED = 0x02, }; -/* Device common power save configurations */ -struct wmi_ps_dev_cfg { - /* lowest level of PS allowed while unassociated, enum wmi_ps_level_e - */ - u8 ps_unassoc_min_level; - /* lowest deep sleep clock level while nonassoc, enum - * wmi_ps_deep_sleep_clk_level_e - */ - u8 ps_unassoc_deep_sleep_min_level; - /* lowest level of PS allowed while associated, enum wmi_ps_level_e */ - u8 ps_assoc_min_level; - /* lowest deep sleep clock level while assoc, enum - * wmi_ps_deep_sleep_clk_level_e - */ - u8 ps_assoc_deep_sleep_min_level; - /* enum wmi_ps_deep_sleep_clk_level_e */ - u8 ps_assoc_low_latency_ds_min_level; - /* enum wmi_ps_d3_resp_policy_e */ - u8 ps_D3_response_policy; - /* BOOL */ - u8 ps_D3_pm_pme_enabled; - /* BOOL */ - u8 ps_halp_enable; - u8 ps_deep_sleep_enter_thresh_msec; - /* BOOL */ - u8 ps_voltage_scaling_en; -} __packed; - -/* WMI_PS_DEV_CFG_CMDID - * - * Configure common Power Save parameters of the device and all MIDs. - * - * Returned event: - * - WMI_PS_DEV_CFG_EVENTID - */ -struct wmi_ps_dev_cfg_cmd { - /* Device Power Save configuration to be applied */ - struct wmi_ps_dev_cfg ps_dev_cfg; - /* alignment to 32b */ - u8 reserved[2]; -} __packed; - -/* WMI_PS_DEV_CFG_EVENTID */ -struct wmi_ps_dev_cfg_event { - /* wmi_ps_cfg_cmd_status_e */ - __le32 status; -} __packed; - -/* WMI_PS_DEV_CFG_READ_CMDID - * - * request to retrieve device Power Save configuration - * (WMI_PS_DEV_CFG_CMD params) - * - * Returned event: - * - WMI_PS_DEV_CFG_READ_EVENTID - */ -struct wmi_ps_dev_cfg_read_cmd { - __le32 reserved; -} __packed; - -/* WMI_PS_DEV_CFG_READ_EVENTID */ -struct wmi_ps_dev_cfg_read_event { - /* wmi_ps_cfg_cmd_status_e */ - __le32 status; - /* Retrieved device Power Save configuration (WMI_PS_DEV_CFG_CMD - * params) - */ - struct wmi_ps_dev_cfg dev_ps_cfg; - /* alignment to 32b */ - u8 reserved[2]; -} __packed; - -/* Per Mac Power Save configurations */ -struct wmi_ps_mid_cfg { - /* Low power RX in BTI is enabled, BOOL */ - u8 beacon_lprx_enable; - /* Sync to sector ID enabled, BOOL */ - u8 beacon_sync_to_sectorId_enable; - /* Low power RX in DTI is enabled, BOOL */ - u8 frame_exchange_lprx_enable; - /* Sleep Cycle while in scheduled PS, 1-31 */ - u8 scheduled_sleep_cycle_pow2; - /* Stay Awake for k BIs every (sleep_cycle - k) BIs, 1-31 */ - u8 scheduled_num_of_awake_bis; - u8 am_to_traffic_load_thresh_mbp; - u8 traffic_to_am_load_thresh_mbps; - u8 traffic_to_am_num_of_no_traffic_bis; - /* BOOL */ - u8 continuous_traffic_psm; - __le16 no_traffic_to_min_usec; - __le16 no_traffic_to_max_usec; - __le16 snoozing_sleep_interval_milisec; - u8 max_no_data_awake_events; - /* Trigger WEB after k failed beacons */ - u8 num_of_failed_beacons_rx_to_trigger_web; - /* Trigger BF after k failed beacons */ - u8 num_of_failed_beacons_rx_to_trigger_bf; - /* Trigger SOB after k successful beacons */ - u8 num_of_successful_beacons_rx_to_trigger_sob; -} __packed; - -/* WMI_PS_MID_CFG_CMDID - * - * Configure Power Save parameters of a specific MID. - * These parameters are relevant for the specific BSS this MID belongs to. - * - * Returned event: - * - WMI_PS_MID_CFG_EVENTID - */ -struct wmi_ps_mid_cfg_cmd { - /* MAC ID */ - u8 mid; - /* mid PS configuration to be applied */ - struct wmi_ps_mid_cfg ps_mid_cfg; -} __packed; - -/* WMI_PS_MID_CFG_EVENTID */ -struct wmi_ps_mid_cfg_event { - /* MAC ID */ - u8 mid; - /* alignment to 32b */ - u8 reserved[3]; - /* wmi_ps_cfg_cmd_status_e */ - __le32 status; -} __packed; - -/* WMI_PS_MID_CFG_READ_CMDID - * - * request to retrieve Power Save configuration of mid - * (WMI_PS_MID_CFG_CMD params) - * - * Returned event: - * - WMI_PS_MID_CFG_READ_EVENTID - */ -struct wmi_ps_mid_cfg_read_cmd { - /* MAC ID */ - u8 mid; - /* alignment to 32b */ - u8 reserved[3]; -} __packed; - -/* WMI_PS_MID_CFG_READ_EVENTID */ -struct wmi_ps_mid_cfg_read_event { - /* MAC ID */ - u8 mid; - /* Retrieved MID Power Save configuration(WMI_PS_MID_CFG_CMD params) */ - struct wmi_ps_mid_cfg mid_ps_cfg; - /* wmi_ps_cfg_cmd_status_e */ - __le32 status; -} __packed; - #define WMI_AOA_MAX_DATA_SIZE (128) enum wmi_aoa_meas_status { @@ -2260,6 +2455,20 @@ struct wmi_tof_session_end_event { u8 reserved[3]; } __packed; +/* WMI_TOF_SET_LCI_EVENTID */ +struct wmi_tof_set_lci_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_TOF_SET_LCR_EVENTID */ +struct wmi_tof_set_lcr_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* Responder FTM Results */ struct wmi_responder_ftm_res { u8 t1[6]; @@ -2313,10 +2522,19 @@ struct wmi_tof_ftm_per_dest_res_event { __le32 tsf_sync; /* actual received ftm per burst */ u8 actual_ftm_per_burst; - u8 reserved0[7]; + /* Measurments are from RFs, defined by the mask */ + __le32 meas_rf_mask; + u8 reserved0[3]; struct wmi_responder_ftm_res responder_ftm_res[0]; } __packed; +/* WMI_TOF_CFG_RESPONDER_EVENTID */ +struct wmi_tof_cfg_responder_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + enum wmi_tof_channel_info_type { WMI_TOF_CHANNEL_INFO_AOA = 0x00, WMI_TOF_CHANNEL_INFO_LCI = 0x01, @@ -2353,12 +2571,15 @@ struct wmi_tof_set_tx_rx_offset_event { struct wmi_tof_get_tx_rx_offset_event { /* enum wmi_fw_status */ u8 status; - u8 reserved1[3]; + /* RF index used to read the offsets */ + u8 rf_index; + u8 reserved1[2]; /* TX delay offset */ __le32 tx_offset; /* RX delay offset */ __le32 rx_offset; - __le32 reserved2[2]; + /* Offset to strongest tap of CIR */ + __le32 precursor; } __packed; /* Result status codes for WMI commands */ @@ -2621,4 +2842,23 @@ struct wmi_prio_tx_sectors_set_default_cfg_event { u8 reserved[3]; } __packed; +/* WMI_SET_SILENT_RSSI_TABLE_DONE_EVENTID */ +struct wmi_set_silent_rssi_table_done_event { + /* enum wmi_silent_rssi_status */ + __le32 status; + /* enum wmi_silent_rssi_table */ + __le32 table; +} __packed; + +/* \WMI_COMMAND_NOT_SUPPORTED_EVENTID */ +struct wmi_command_not_supported_event { + /* device id */ + u8 mid; + u8 reserved0; + __le16 command_id; + /* for UT command only, otherwise reserved */ + __le16 command_subtype; + __le16 reserved1; +} __packed; + #endif /* __WILOCITY_WMI_H__ */ diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c index 35d7fe1c318c..916820ee4f5d 100644 --- a/drivers/net/wireless/cnss2/debug.c +++ b/drivers/net/wireless/cnss2/debug.c @@ -158,6 +158,7 @@ static ssize_t cnss_dev_boot_debug_write(struct file *fp, } else if (sysfs_streq(cmd, "enumerate")) { ret = cnss_pci_init(plat_priv); } else if (sysfs_streq(cmd, "download")) { + set_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state); ret = cnss_pci_start_mhi(plat_priv->bus_priv); } else if (sysfs_streq(cmd, "linkup")) { ret = cnss_resume_pci_link(plat_priv->bus_priv); diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index 23a81ff071ee..c033d843949f 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -1613,7 +1613,7 @@ int cnss_force_fw_assert(struct device *dev) if (plat_priv->device_id == QCA6174_DEVICE_ID) { cnss_pr_info("Forced FW assert is not supported\n"); - return -EINVAL; + return -EOPNOTSUPP; } if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) { diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h index a3081433cc2b..9b56eb0c02fb 100644 --- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h +++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h @@ -77,7 +77,7 @@ #define QMI_WLFW_FUNCTION_NAME_LEN_V01 128 #define QMI_WLFW_MAX_NUM_CE_V01 12 #define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32 -#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 512 +#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144 #define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128 #define QMI_WLFW_MAX_STR_LEN_V01 16 #define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24 @@ -483,7 +483,7 @@ struct wlfw_athdiag_read_resp_msg_v01 { u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01]; }; -#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 524 +#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156 extern struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[]; struct wlfw_athdiag_write_req_msg_v01 { @@ -493,7 +493,7 @@ struct wlfw_athdiag_write_req_msg_v01 { u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01]; }; -#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 531 +#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163 extern struct elem_info wlfw_athdiag_write_req_msg_v01_ei[]; struct wlfw_athdiag_write_resp_msg_v01 { diff --git a/drivers/net/wireless/cnss_genl/cnss_nl.c b/drivers/net/wireless/cnss_genl/cnss_nl.c index fafd9ce4b4c4..29dd4c999f2d 100644 --- a/drivers/net/wireless/cnss_genl/cnss_nl.c +++ b/drivers/net/wireless/cnss_genl/cnss_nl.c @@ -64,6 +64,8 @@ static const struct nla_policy cld80211_policy[CLD80211_ATTR_MAX + 1] = { [CLD80211_ATTR_VENDOR_DATA] = { .type = NLA_NESTED }, [CLD80211_ATTR_DATA] = { .type = NLA_BINARY, .len = CLD80211_MAX_NL_DATA }, + [CLD80211_ATTR_META_DATA] = { .type = NLA_BINARY, + .len = CLD80211_MAX_NL_DATA }, }; static int cld80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 0333ab0fd926..34173b5e886f 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -201,6 +201,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ unsigned long remaining_credit; struct timer_list credit_timeout; u64 credit_window_start; + bool rate_limited; /* Statistics */ struct xenvif_stats stats; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index e7bd63eb2876..60b26f32d31d 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -105,7 +105,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete(napi); - xenvif_napi_schedule_or_enable_events(queue); + /* If the queue is rate-limited, it shall be + * rescheduled in the timer callback. + */ + if (likely(!queue->rate_limited)) + xenvif_napi_schedule_or_enable_events(queue); } return work_done; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 1049c34e7d43..72ee1c305cc4 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -687,6 +687,7 @@ static void tx_add_credit(struct xenvif_queue *queue) max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ queue->remaining_credit = min(max_credit, max_burst); + queue->rate_limited = false; } void xenvif_tx_credit_callback(unsigned long data) @@ -1184,8 +1185,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) msecs_to_jiffies(queue->credit_usec / 1000); /* Timer could already be pending in rare cases. */ - if (timer_pending(&queue->credit_timeout)) + if (timer_pending(&queue->credit_timeout)) { + queue->rate_limited = true; return true; + } /* Passed the point where we can replenish credit? */ if (time_after_eq64(now, next_credit)) { @@ -1200,6 +1203,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) mod_timer(&queue->credit_timeout, next_credit); queue->credit_window_start = next_credit; + queue->rate_limited = true; return true; } diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index a5d7332dfce5..2d043415f326 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -177,6 +177,16 @@ static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb) /* Packet that contains a length */ if (tmp[0] == 0 && tmp[1] == 0) { phy->next_read_size = (tmp[2] << 8) + tmp[3] + 3; + /* + * Ensure next_read_size does not exceed sizeof(tmp) + * for reading that many bytes during next iteration + */ + if (phy->next_read_size > FDP_NCI_I2C_MAX_PAYLOAD) { + dev_dbg(&client->dev, "%s: corrupted packet\n", + __func__); + phy->next_read_size = 5; + goto flush; + } } else { phy->next_read_size = FDP_NCI_I2C_MIN_PAYLOAD; diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c index 798a32bbac5d..206285210ab5 100644 --- a/drivers/nfc/st21nfca/dep.c +++ b/drivers/nfc/st21nfca/dep.c @@ -217,7 +217,8 @@ static int st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev, atr_req = (struct st21nfca_atr_req *)skb->data; - if (atr_req->length < sizeof(struct st21nfca_atr_req)) { + if (atr_req->length < sizeof(struct st21nfca_atr_req) || + atr_req->length > skb->len) { r = -EPROTO; goto exit; } diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index c79d99b24c96..2d4b6e910b87 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -321,23 +321,33 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, * AID 81 5 to 16 * PARAMETERS 82 0 to 255 */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && + if (skb->len < NFC_MIN_AID_LENGTH + 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; + /* + * Buffer should have enough space for at least + * two tag fields + two length fields + aid_len (skb->data[1]) + */ + if (skb->len < skb->data[1] + 4) + return -EPROTO; + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); transaction->aid_len = skb->data[1]; memcpy(transaction->aid, &skb->data[2], transaction->aid_len); + transaction->params_len = skb->data[transaction->aid_len + 3]; - /* Check next byte is PARAMETERS tag (82) */ + /* Check next byte is PARAMETERS tag (82) and the length field */ if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) + NFC_EVT_TRANSACTION_PARAMS_TAG || + skb->len < transaction->aid_len + transaction->params_len + 4) { + devm_kfree(dev, transaction); return -EPROTO; + } - transaction->params_len = skb->data[transaction->aid_len + 3]; memcpy(transaction->params, skb->data + transaction->aid_len + 4, transaction->params_len); diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index ecc6fb9ca92f..3bbdf60f8908 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -599,7 +599,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, if (!mw->virt_addr) return -ENOMEM; - if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) + if (mw_num < qp_count % mw_count) num_qps_mw = qp_count / mw_count + 1; else num_qps_mw = qp_count / mw_count; @@ -947,7 +947,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, qp->event_handler = NULL; ntb_qp_link_down_reset(qp); - if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) + if (mw_num < qp_count % mw_count) num_qps_mw = qp_count / mw_count + 1; else num_qps_mw = qp_count / mw_count; @@ -1065,8 +1065,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) qp_count = ilog2(qp_bitmap); if (max_num_clients && max_num_clients < qp_count) qp_count = max_num_clients; - else if (mw_count < qp_count) - qp_count = mw_count; + else if (nt->mw_count < qp_count) + qp_count = nt->mw_count; qp_bitmap &= BIT_ULL(qp_count) - 1; diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 7b0ca1551d7b..005ea632ba53 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -954,7 +954,7 @@ static int __init dino_probe(struct parisc_device *dev) dino_dev->hba.dev = dev; dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); - dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */ + dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND; spin_lock_init(&dino_dev->dinosaur_pen); dino_dev->hba.iommu = ccio_get_iommu(dev); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 71ccf6a90b22..2551e4adb33f 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -194,8 +194,6 @@ static int exynos_irq_request_resources(struct irq_data *irqd) spin_unlock_irqrestore(&bank->slock, flags); - exynos_irq_unmask(irqd); - return 0; } @@ -216,8 +214,6 @@ static void exynos_irq_release_resources(struct irq_data *irqd) shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC]; mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; - exynos_irq_mask(irqd); - spin_lock_irqsave(&bank->slock, flags); con = readl(d->virt_base + reg_con); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c index 862a096c5dba..be5c71df148d 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c @@ -811,6 +811,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = { SUNXI_FUNCTION(0x2, "lcd1"), /* D16 */ SUNXI_FUNCTION(0x3, "pata"), /* ATAD12 */ SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */ + SUNXI_FUNCTION(0x5, "sim"), /* DET */ SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */ SUNXI_FUNCTION(0x7, "csi1")), /* D16 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17), diff --git a/drivers/platform/msm/gpio-usbdetect.c b/drivers/platform/msm/gpio-usbdetect.c index dc05d7108135..adf47fc32548 100644 --- a/drivers/platform/msm/gpio-usbdetect.c +++ b/drivers/platform/msm/gpio-usbdetect.c @@ -50,6 +50,7 @@ static irqreturn_t gpio_usbdetect_vbus_irq(int irq, void *data) if (usb->vbus_state) { dev_dbg(&usb->pdev->dev, "setting vbus notification\n"); extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 1); + extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_SPEED, 1); } else { dev_dbg(&usb->pdev->dev, "setting vbus removed notification\n"); extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 0); @@ -85,6 +86,7 @@ static irqreturn_t gpio_usbdetect_id_irq_thread(int irq, void *data) dev_dbg(&usb->pdev->dev, "starting usb HOST\n"); disable_irq(usb->vbus_det_irq); extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_HOST, 1); + extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_SPEED, 1); } return IRQ_HANDLED; } @@ -186,6 +188,14 @@ static int gpio_usbdetect_probe(struct platform_device *pdev) enable_irq_wake(usb->id_det_irq); dev_set_drvdata(&pdev->dev, usb); + if (usb->id_det_irq) { + gpio_usbdetect_id_irq(usb->id_det_irq, usb); + if (!usb->id_state) { + gpio_usbdetect_id_irq_thread(usb->id_det_irq, usb); + return 0; + } + } + /* Read and report initial VBUS state */ gpio_usbdetect_vbus_irq(usb->vbus_det_irq, usb); diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index 1a6ba1a915a0..bc0263c371a1 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -2943,6 +2943,25 @@ struct device *ipa_get_pdev(void) } EXPORT_SYMBOL(ipa_get_pdev); +int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data), + void *user_data) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_ntn_uc_reg_rdyCB, + ipauc_ready_cb, user_data); + + return ret; +} +EXPORT_SYMBOL(ipa_ntn_uc_reg_rdyCB); + +void ipa_ntn_uc_dereg_rdyCB(void) +{ + IPA_API_DISPATCH(ipa_ntn_uc_dereg_rdyCB); +} +EXPORT_SYMBOL(ipa_ntn_uc_dereg_rdyCB); + + static const struct dev_pm_ops ipa_pm_ops = { .suspend_noirq = ipa_ap_suspend, .resume_noirq = ipa_ap_resume, diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h index 69bc4ae1fa6a..1fb0e7122042 100644 --- a/drivers/platform/msm/ipa/ipa_api.h +++ b/drivers/platform/msm/ipa/ipa_api.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -374,6 +374,11 @@ struct ipa_api_controller { int ipa_ep_idx_dl); struct device *(*ipa_get_pdev)(void); + + int (*ipa_ntn_uc_reg_rdyCB)(void (*ipauc_ready_cb)(void *user_data), + void *user_data); + + void (*ipa_ntn_uc_dereg_rdyCB)(void); }; #ifdef CONFIG_IPA diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c index 51c930a81c8d..3a1e38f32321 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c @@ -622,3 +622,41 @@ int ipa_uc_offload_cleanup(u32 clnt_hdl) return ret; } EXPORT_SYMBOL(ipa_uc_offload_cleanup); + +/** + * ipa_uc_offload_uc_rdyCB() - To register uC ready CB if uC not + * ready + * @inout: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *inp) +{ + int ret = 0; + + if (!inp) { + IPA_UC_OFFLOAD_ERR("Invalid input\n"); + return -EINVAL; + } + + if (inp->proto == IPA_UC_NTN) + ret = ipa_ntn_uc_reg_rdyCB(inp->notify, inp->priv); + + if (ret == -EEXIST) { + inp->is_uC_ready = true; + ret = 0; + } else + inp->is_uC_ready = false; + + return ret; +} +EXPORT_SYMBOL(ipa_uc_offload_reg_rdyCB); + +void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto) +{ + if (proto == IPA_UC_NTN) + ipa_ntn_uc_dereg_rdyCB(); +} +EXPORT_SYMBOL(ipa_uc_offload_dereg_rdyCB); diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c index 293371b88ab9..8142a5923855 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c @@ -902,7 +902,7 @@ int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot, mutex_lock(&ipa3_usb_ctx->general_mutex); IPA_USB_DBG_LOW("entry\n"); - if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE || + if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE || ((teth_prot == IPA_USB_RNDIS || teth_prot == IPA_USB_ECM) && teth_params == NULL) || ipa_usb_notify_cb == NULL || user_data == NULL) { @@ -1105,7 +1105,8 @@ static bool ipa3_usb_check_chan_params(struct ipa_usb_xdci_chan_params *params) params->xfer_scratch.depcmd_hi_addr); if (params->client >= IPA_CLIENT_MAX || - params->teth_prot > IPA_USB_MAX_TETH_PROT_SIZE || + params->teth_prot < 0 || + params->teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE || params->xfer_ring_len % GSI_CHAN_RE_SIZE_16B || params->xfer_scratch.const_buffer_size < 1 || params->xfer_scratch.const_buffer_size > 31) { @@ -1369,7 +1370,7 @@ static int ipa3_usb_release_xdci_channel(u32 clnt_hdl, int result = 0; IPA_USB_DBG_LOW("entry\n"); - if (ttype > IPA_USB_TRANSPORT_MAX) { + if (ttype < 0 || ttype >= IPA_USB_TRANSPORT_MAX) { IPA_USB_ERR("bad parameter.\n"); return -EINVAL; } @@ -1473,7 +1474,8 @@ static bool ipa3_usb_check_connect_params( (params->teth_prot != IPA_USB_DIAG && (params->usb_to_ipa_xferrscidx < 0 || params->usb_to_ipa_xferrscidx > 127)) || - params->teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) { + params->teth_prot < 0 || + params->teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) { IPA_USB_ERR("Invalid params\n"); return false; } @@ -2177,7 +2179,7 @@ EXPORT_SYMBOL(ipa_usb_xdci_connect); static int ipa3_usb_check_disconnect_prot(enum ipa_usb_teth_prot teth_prot) { - if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) { + if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) { IPA_USB_ERR("bad parameter.\n"); return -EFAULT; } @@ -2367,7 +2369,7 @@ int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot) mutex_lock(&ipa3_usb_ctx->general_mutex); IPA_USB_DBG_LOW("entry\n"); - if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) { + if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) { IPA_USB_ERR("bad parameters.\n"); result = -EINVAL; goto bad_params; @@ -2553,7 +2555,7 @@ int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl, mutex_lock(&ipa3_usb_ctx->general_mutex); IPA_USB_DBG_LOW("entry\n"); - if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) { + if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) { IPA_USB_ERR("bad parameters.\n"); result = -EINVAL; goto bad_params; @@ -2754,7 +2756,7 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, mutex_lock(&ipa3_usb_ctx->general_mutex); IPA_USB_DBG_LOW("entry\n"); - if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) { + if (teth_prot < 0 || teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE) { IPA_USB_ERR("bad parameters.\n"); result = -EINVAL; goto bad_params; diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h index d5f102eaaac6..911db0b19079 100644 --- a/drivers/platform/msm/ipa/ipa_common_i.h +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -379,6 +379,9 @@ u8 *ipa_write_16(u16 hw, u8 *dest); u8 *ipa_write_8(u8 b, u8 *dest); u8 *ipa_pad_to_64(u8 *dest); u8 *ipa_pad_to_32(u8 *dest); +int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data), + void *user_data); +void ipa_ntn_uc_dereg_rdyCB(void); const char *ipa_get_version_string(enum ipa_hw_type ver); #endif /* _IPA_COMMON_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h index ae6cfc4fcd50..0bc4b768e847 100644 --- a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h +++ b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,4 +21,7 @@ int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, struct ipa_ntn_conn_out_params *outp); int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl); +int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data), + void *user_data); +void ipa_ntn_uc_dereg_rdyCB(void); #endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c index 85fa9da50779..df741c1c8e5f 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -531,7 +531,7 @@ static void ipa_wan_msg_free_cb(void *buff, u32 len, u32 type) kfree(buff); } -static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type) +static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_cache) { int retval; struct ipa_wan_msg *wan_msg; @@ -559,6 +559,25 @@ static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type) return retval; } + if (is_cache) { + mutex_lock(&ipa_ctx->ipa_cne_evt_lock); + + /* cache the cne event */ + memcpy(&ipa_ctx->ipa_cne_evt_req_cache[ + ipa_ctx->num_ipa_cne_evt_req].wan_msg, + wan_msg, + sizeof(struct ipa_wan_msg)); + + memcpy(&ipa_ctx->ipa_cne_evt_req_cache[ + ipa_ctx->num_ipa_cne_evt_req].msg_meta, + &msg_meta, + sizeof(struct ipa_msg_meta)); + + ipa_ctx->num_ipa_cne_evt_req++; + ipa_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE; + mutex_unlock(&ipa_ctx->ipa_cne_evt_lock); + } + return 0; } @@ -1328,21 +1347,21 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD: - retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD); + retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true); if (retval) { IPAERR("ipa_send_wan_msg failed: %d\n", retval); break; } break; case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL: - retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL); + retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true); if (retval) { IPAERR("ipa_send_wan_msg failed: %d\n", retval); break; } break; case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED: - retval = ipa_send_wan_msg(arg, WAN_EMBMS_CONNECT); + retval = ipa_send_wan_msg(arg, WAN_EMBMS_CONNECT, false); if (retval) { IPAERR("ipa_send_wan_msg failed: %d\n", retval); break; @@ -4165,6 +4184,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p, mutex_init(&ipa_ctx->lock); mutex_init(&ipa_ctx->nat_mem.lock); + mutex_init(&ipa_ctx->ipa_cne_evt_lock); idr_init(&ipa_ctx->ipa_idr); spin_lock_init(&ipa_ctx->idr_lock); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c index cb95f6e98956..ecbbe516266e 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c @@ -83,6 +83,12 @@ const char *ipa_event_name[] = { __stringify(IPA_QUOTA_REACH), __stringify(IPA_SSR_BEFORE_SHUTDOWN), __stringify(IPA_SSR_AFTER_POWERUP), + __stringify(ADD_VLAN_IFACE), + __stringify(DEL_VLAN_IFACE), + __stringify(ADD_L2TP_VLAN_MAPPING), + __stringify(DEL_L2TP_VLAN_MAPPING), + __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT), + __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT), }; const char *ipa_hdr_l2_type_name[] = { @@ -808,10 +814,11 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count, eq = true; } else { rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl); - if (rt_tbl) - rt_tbl_idx = rt_tbl->idx; + if (rt_tbl == NULL || + rt_tbl->cookie != IPA_RT_TBL_COOKIE) + rt_tbl_idx = ~0; else - rt_tbl_idx = ~0; + rt_tbl_idx = rt_tbl->idx; bitmap = entry->rule.attrib.attrib_mask; eq = false; } @@ -838,10 +845,11 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count, eq = true; } else { rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl); - if (rt_tbl) - rt_tbl_idx = rt_tbl->idx; - else + if (rt_tbl == NULL || + rt_tbl->cookie != IPA_RT_TBL_COOKIE) rt_tbl_idx = ~0; + else + rt_tbl_idx = rt_tbl->idx; bitmap = entry->rule.attrib.attrib_mask; eq = false; } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h index bfb1ce56412c..28689eb83d4e 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -65,6 +65,8 @@ #define IPA_IPC_LOG_PAGES 50 +#define IPA_MAX_NUM_REQ_CACHE 10 + #define IPADBG(fmt, args...) \ do { \ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ @@ -996,6 +998,11 @@ struct ipacm_client_info { bool uplink; }; +struct ipa_cne_evt { + struct ipa_wan_msg wan_msg; + struct ipa_msg_meta msg_meta; +}; + /** * struct ipa_context - IPA context * @class: pointer to the struct class @@ -1197,6 +1204,9 @@ struct ipa_context { u32 ipa_rx_max_timeout_usec; u32 ipa_polling_iteration; bool ipa_uc_monitor_holb; + struct ipa_cne_evt ipa_cne_evt_req_cache[IPA_MAX_NUM_REQ_CACHE]; + int num_ipa_cne_evt_req; + struct mutex ipa_cne_evt_lock; }; /** @@ -1566,6 +1576,8 @@ int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp, ipa_notify_cb notify, void *priv, u8 hdr_len, struct ipa_ntn_conn_out_params *outp); int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl); +int ipa2_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv); +void ipa2_ntn_uc_dereg_rdyCB(void); /* * To retrieve doorbell physical address of diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c index 0531919487d7..293a60a60881 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c @@ -53,7 +53,7 @@ int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip, int pipe_idx; if (buf == NULL) { - memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE); + memset(tmp, 0, (IPA_RT_FLT_HW_RULE_BUF_SIZE/4)); buf = (u8 *)tmp; } @@ -75,8 +75,15 @@ int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip, rule_hdr->u.hdr.pipe_dest_idx = pipe_idx; rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl; if (entry->hdr) { - rule_hdr->u.hdr.hdr_offset = - entry->hdr->offset_entry->offset >> 2; + if (entry->hdr->cookie == IPA_HDR_COOKIE) { + rule_hdr->u.hdr.hdr_offset = + entry->hdr->offset_entry->offset >> 2; + } else { + IPAERR("Entry hdr deleted by user = %d cookie = %u\n", + entry->hdr->user_deleted, entry->hdr->cookie); + WARN_ON(1); + rule_hdr->u.hdr.hdr_offset = 0; + } } else { rule_hdr->u.hdr.hdr_offset = 0; } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c index 0b46ab2a8439..2aedfe04b5a3 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -165,6 +165,17 @@ int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data) return -EEXIST; } +int ipa2_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv) +{ + return ipa2_register_ipa_ready_cb(ipauc_ready_cb, priv); +} + +void ipa2_ntn_uc_dereg_rdyCB(void) +{ + ipa_ctx->uc_ntn_ctx.uc_ready_cb = NULL; + ipa_ctx->uc_ntn_ctx.priv = NULL; +} + static void ipa_uc_ntn_loaded_handler(void) { if (!ipa_ctx) { diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c index 50a8e46d3b12..a9bd0e11b330 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c @@ -5155,6 +5155,8 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_tear_down_uc_offload_pipes = ipa2_tear_down_uc_offload_pipes; api_ctrl->ipa_get_pdev = ipa2_get_pdev; + api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa2_ntn_uc_reg_rdyCB; + api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa2_ntn_uc_dereg_rdyCB; return 0; } diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c index 5dbd43b44540..12b43882ed5b 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -411,12 +411,15 @@ int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 { int i, j; + /* prevent multi-threads accessing num_q6_rule */ + mutex_lock(&add_mux_channel_lock); if (rule_req->filter_spec_list_valid == true) { num_q6_rule = rule_req->filter_spec_list_len; IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule); } else { num_q6_rule = 0; IPAWANERR("got no UL rules from modem\n"); + mutex_unlock(&add_mux_channel_lock); return -EINVAL; } @@ -610,9 +613,11 @@ failure: num_q6_rule = 0; memset(ipa_qmi_ctx->q6_ul_filter_rule, 0, sizeof(ipa_qmi_ctx->q6_ul_filter_rule)); + mutex_unlock(&add_mux_channel_lock); return -EINVAL; success: + mutex_unlock(&add_mux_channel_lock); return 0; } @@ -1622,9 +1627,12 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* already got Q6 UL filter rules*/ if (ipa_qmi_ctx && ipa_qmi_ctx->modem_cfg_emb_pipe_flt - == false) + == false) { + /* protect num_q6_rule */ + mutex_lock(&add_mux_channel_lock); rc = wwan_add_ul_flt_rule_to_ipa(); - else + mutex_unlock(&add_mux_channel_lock); + } else rc = 0; egress_set = true; if (rc) @@ -2687,6 +2695,9 @@ int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data) enum ipa_upstream_type upstream_type; int rc = 0; + /* prevent string buffer overflows */ + data->interface_name[IFNAMSIZ-1] = '\0'; + /* get IPA backhaul type */ upstream_type = find_upstream_type(data->interface_name); @@ -2978,6 +2989,10 @@ int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, enum ipa_upstream_type upstream_type; int rc = 0; + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + data->tetherIface[IFNAMSIZ-1] = '\0'; + /* get IPA backhaul type */ upstream_type = find_upstream_type(data->upstreamIface); @@ -3012,6 +3027,10 @@ int rmnet_ipa_query_tethering_stats_all( int rc = 0; memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); + + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + /* get IPA backhaul type */ upstream_type = find_upstream_type(data->upstreamIface); @@ -3055,6 +3074,9 @@ int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data) memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + /* get IPA backhaul type */ upstream_type = find_upstream_type(data->upstreamIface); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 2615db4e9755..73321df80ada 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -143,6 +143,9 @@ #define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \ IPA_IOCTL_ALLOC_NAT_MEM, \ compat_uptr_t) +#define IPA_IOC_ALLOC_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ALLOC_NAT_TABLE, \ + compat_uptr_t) #define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \ IPA_IOCTL_V4_INIT_NAT, \ compat_uptr_t) @@ -152,6 +155,9 @@ #define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \ IPA_IOCTL_V4_DEL_NAT, \ compat_uptr_t) +#define IPA_IOC_DEL_NAT_TABLE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_NAT_TABLE, \ + compat_uptr_t) #define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \ IPA_IOCTL_GET_NAT_OFFSET, \ compat_uptr_t) @@ -207,6 +213,18 @@ struct ipa3_ioc_nat_alloc_mem32 { compat_size_t size; compat_off_t offset; }; + +/** +* struct ipa_ioc_nat_ipv6ct_table_alloc32 - table memory allocation +* properties +* @size: input parameter, size of table in bytes +* @offset: output parameter, offset into page in case of system memory +*/ +struct ipa_ioc_nat_ipv6ct_table_alloc32 { + compat_size_t size; + compat_off_t offset; +}; + #endif #define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311 @@ -580,7 +598,7 @@ static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type) kfree(buff); } -static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type) +static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_cache) { int retval; struct ipa_wan_msg *wan_msg; @@ -608,9 +626,112 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type) return retval; } + if (is_cache) { + mutex_lock(&ipa3_ctx->ipa_cne_evt_lock); + + /* cache the cne event */ + memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[ + ipa3_ctx->num_ipa_cne_evt_req].wan_msg, + wan_msg, + sizeof(struct ipa_wan_msg)); + + memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[ + ipa3_ctx->num_ipa_cne_evt_req].msg_meta, + &msg_meta, + sizeof(struct ipa_msg_meta)); + + ipa3_ctx->num_ipa_cne_evt_req++; + ipa3_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE; + mutex_unlock(&ipa3_ctx->ipa_cne_evt_lock); + } + return 0; } +static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAERR("Null buffer\n"); + return; + } + + if (type != ADD_VLAN_IFACE && + type != DEL_VLAN_IFACE && + type != ADD_L2TP_VLAN_MAPPING && + type != DEL_L2TP_VLAN_MAPPING) { + IPAERR("Wrong type given. buff %pK type %d\n", buff, type); + return; + } + + kfree(buff); +} + +static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type) +{ + int retval; + struct ipa_ioc_vlan_iface_info *vlan_info; + struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info; + struct ipa_msg_meta msg_meta; + + if (msg_type == ADD_VLAN_IFACE || + msg_type == DEL_VLAN_IFACE) { + vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info), + GFP_KERNEL); + if (!vlan_info) { + IPAERR("no memory\n"); + return -ENOMEM; + } + + if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param, + sizeof(struct ipa_ioc_vlan_iface_info))) { + kfree(vlan_info); + return -EFAULT; + } + + memset(&msg_meta, 0, sizeof(msg_meta)); + msg_meta.msg_type = msg_type; + msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info); + retval = ipa3_send_msg(&msg_meta, vlan_info, + ipa3_vlan_l2tp_msg_free_cb); + if (retval) { + IPAERR("ipa3_send_msg failed: %d\n", retval); + kfree(vlan_info); + return retval; + } + } else if (msg_type == ADD_L2TP_VLAN_MAPPING || + msg_type == DEL_L2TP_VLAN_MAPPING) { + mapping_info = kzalloc(sizeof(struct + ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL); + if (!mapping_info) { + IPAERR("no memory\n"); + return -ENOMEM; + } + + if (copy_from_user((u8 *)mapping_info, + (void __user *)usr_param, + sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) { + kfree(mapping_info); + return -EFAULT; + } + + memset(&msg_meta, 0, sizeof(msg_meta)); + msg_meta.msg_type = msg_type; + msg_meta.msg_len = sizeof(struct + ipa_ioc_l2tp_vlan_mapping_info); + retval = ipa3_send_msg(&msg_meta, mapping_info, + ipa3_vlan_l2tp_msg_free_cb); + if (retval) { + IPAERR("ipa3_send_msg failed: %d\n", retval); + kfree(mapping_info); + return retval; + } + } else { + IPAERR("Unexpected event\n"); + return -EFAULT; + } + + return 0; +} static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { @@ -619,8 +740,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) u8 header[128] = { 0 }; u8 *param = NULL; struct ipa_ioc_nat_alloc_mem nat_mem; + struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc; struct ipa_ioc_v4_nat_init nat_init; struct ipa_ioc_v4_nat_del nat_del; + struct ipa_ioc_nat_ipv6ct_table_del table_del; struct ipa_ioc_rm_dependency rm_depend; size_t sz; int pre_entry; @@ -659,6 +782,26 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } break; + + case IPA_IOC_ALLOC_NAT_TABLE: + if (copy_from_user(&table_alloc, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc))) { + retval = -EFAULT; + break; + } + + if (ipa3_allocate_nat_table(&table_alloc)) { + retval = -EFAULT; + break; + } + if (table_alloc.offset && + copy_to_user((void __user *)arg, &table_alloc, sizeof( + struct ipa_ioc_nat_ipv6ct_table_alloc))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_V4_INIT_NAT: if (copy_from_user((u8 *)&nat_init, (u8 *)arg, sizeof(struct ipa_ioc_v4_nat_init))) { @@ -719,6 +862,18 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; + case IPA_IOC_DEL_NAT_TABLE: + if (copy_from_user(&table_del, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_ipv6ct_table_del))) { + retval = -EFAULT; + break; + } + if (ipa3_del_nat_table(&table_del)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_ADD_HDR: if (copy_from_user(header, (u8 *)arg, sizeof(struct ipa_ioc_add_hdr))) { @@ -837,8 +992,52 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } break; + + case IPA_IOC_ADD_RT_RULE_EXT: + if (copy_from_user(header, + (const void __user *)arg, + sizeof(struct ipa_ioc_add_rt_rule_ext))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_rt_rule_ext *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_rt_rule_ext) + + pre_entry * sizeof(struct ipa_rt_rule_add_ext); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, + pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely( + ((struct ipa_ioc_add_rt_rule_ext *)param)->num_rules + != pre_entry)) { + IPAERR(" prevent memory corruption(%d not match %d)\n", + ((struct ipa_ioc_add_rt_rule_ext *)param)-> + num_rules, + pre_entry); + retval = -EINVAL; + break; + } + if (ipa3_add_rt_rule_ext( + (struct ipa_ioc_add_rt_rule_ext *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; case IPA_IOC_ADD_RT_RULE_AFTER: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_add_rt_rule_after))) { retval = -EFAULT; @@ -1467,21 +1666,21 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD: - retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD); + retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true); if (retval) { IPAERR("ipa3_send_wan_msg failed: %d\n", retval); break; } break; case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL: - retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL); + retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true); if (retval) { IPAERR("ipa3_send_wan_msg failed: %d\n", retval); break; } break; case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED: - retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT); + retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT, false); if (retval) { IPAERR("ipa3_send_wan_msg failed: %d\n", retval); break; @@ -1582,6 +1781,34 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; + case IPA_IOC_ADD_VLAN_IFACE: + if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_VLAN_IFACE: + if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_L2TP_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_L2TP_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; + default: /* redundant, as cmd was checked against MAXNR */ IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return -ENOTTY; @@ -2997,6 +3224,34 @@ static void ipa3_teardown_apps_pipes(void) } #ifdef CONFIG_COMPAT +static long compat_ipa3_nat_ipv6ct_alloc_table(unsigned long arg, + int (alloc_func)(struct ipa_ioc_nat_ipv6ct_table_alloc *)) +{ + long retval; + struct ipa_ioc_nat_ipv6ct_table_alloc32 table_alloc32; + struct ipa_ioc_nat_ipv6ct_table_alloc table_alloc; + + retval = copy_from_user(&table_alloc32, (const void __user *)arg, + sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32)); + if (retval) + return retval; + + table_alloc.size = (size_t)table_alloc32.size; + table_alloc.offset = (off_t)table_alloc32.offset; + + retval = alloc_func(&table_alloc); + if (retval) + return retval; + + if (table_alloc.offset) { + table_alloc32.offset = (compat_off_t)table_alloc.offset; + retval = copy_to_user((void __user *)arg, &table_alloc32, + sizeof(struct ipa_ioc_nat_ipv6ct_table_alloc32)); + } + + return retval; +} + long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int retval = 0; @@ -3068,6 +3323,9 @@ long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } ret: return retval; + case IPA_IOC_ALLOC_NAT_TABLE32: + return compat_ipa3_nat_ipv6ct_alloc_table(arg, + ipa3_allocate_nat_table); case IPA_IOC_V4_INIT_NAT32: cmd = IPA_IOC_V4_INIT_NAT; break; @@ -3077,6 +3335,9 @@ ret: case IPA_IOC_V4_DEL_NAT32: cmd = IPA_IOC_V4_DEL_NAT; break; + case IPA_IOC_DEL_NAT_TABLE32: + cmd = IPA_IOC_DEL_NAT_TABLE; + break; case IPA_IOC_GET_NAT_OFFSET32: cmd = IPA_IOC_GET_NAT_OFFSET; break; @@ -4622,6 +4883,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, mutex_init(&ipa3_ctx->lock); mutex_init(&ipa3_ctx->nat_mem.lock); mutex_init(&ipa3_ctx->q6_proxy_clk_vote_mutex); + mutex_init(&ipa3_ctx->ipa_cne_evt_lock); idr_init(&ipa3_ctx->ipa_idr); spin_lock_init(&ipa3_ctx->idr_lock); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index fbf84ab7d2d4..71da7d28a451 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -64,6 +64,12 @@ const char *ipa3_event_name[] = { __stringify(IPA_QUOTA_REACH), __stringify(IPA_SSR_BEFORE_SHUTDOWN), __stringify(IPA_SSR_AFTER_POWERUP), + __stringify(ADD_VLAN_IFACE), + __stringify(DEL_VLAN_IFACE), + __stringify(ADD_L2TP_VLAN_MAPPING), + __stringify(DEL_L2TP_VLAN_MAPPING), + __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT), + __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT), }; const char *ipa3_hdr_l2_type_name[] = { @@ -863,10 +869,11 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count, eq = true; } else { rt_tbl = ipa3_id_find(entry->rule.rt_tbl_hdl); - if (rt_tbl) - rt_tbl_idx = rt_tbl->idx; + if (rt_tbl == NULL || + rt_tbl->cookie != IPA_RT_TBL_COOKIE) + rt_tbl_idx = ~0; else - rt_tbl_idx = ~0; + rt_tbl_idx = rt_tbl->idx; bitmap = entry->rule.attrib.attrib_mask; eq = false; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c index c2fb87ab757b..a03d8978c6c2 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c @@ -1157,6 +1157,13 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules) goto bail; } + if (entry->cookie != IPA_FLT_COOKIE) { + IPAERR_RL("Invalid cookie value = %u flt hdl id = %d\n", + entry->cookie, rules->add_after_hdl); + result = -EINVAL; + goto bail; + } + if (entry->tbl != tbl) { IPAERR_RL("given entry does not match the table\n"); result = -EINVAL; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index f12eb6ded556..89c7b66b98d6 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -38,7 +38,6 @@ #include "ipa_uc_offload_i.h" #define DRV_NAME "ipa" -#define NAT_DEV_NAME "ipaNatTable" #define IPA_COOKIE 0x57831603 #define IPA_RT_RULE_COOKIE 0x57831604 #define IPA_RT_TBL_COOKIE 0x57831605 @@ -65,6 +64,8 @@ #define IPA_IPC_LOG_PAGES 50 +#define IPA_MAX_NUM_REQ_CACHE 10 + #define IPADBG(fmt, args...) \ do { \ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ @@ -434,6 +435,7 @@ struct ipa3_rt_entry { int id; u16 prio; u16 rule_id; + u16 rule_id_valid; }; /** @@ -1050,6 +1052,11 @@ struct ipa_dma_task_info { struct ipahal_imm_cmd_pyld *cmd_pyld; }; +struct ipa_cne_evt { + struct ipa_wan_msg wan_msg; + struct ipa_msg_meta msg_meta; +}; + /** * struct ipa3_context - IPA context * @class: pointer to the struct class @@ -1271,6 +1278,9 @@ struct ipa3_context { u32 ipa_tz_unlock_reg_num; struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg; struct ipa_dma_task_info dma_task_info; + struct ipa_cne_evt ipa_cne_evt_req_cache[IPA_MAX_NUM_REQ_CACHE]; + int num_ipa_cne_evt_req; + struct mutex ipa_cne_evt_lock; }; /** @@ -1606,6 +1616,8 @@ int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, */ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); +int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules); + int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules); int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); @@ -1641,12 +1653,15 @@ int ipa3_reset_flt(enum ipa_ip_type ip); * NAT */ int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem); +int ipa3_allocate_nat_table( + struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc); int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init); int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma); int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del); +int ipa3_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del); /* * Messaging @@ -1721,6 +1736,8 @@ int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, ipa_notify_cb notify, void *priv, u8 hdr_len, struct ipa_ntn_conn_out_params *outp); int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl); +int ipa3_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv); +void ipa3_ntn_uc_dereg_rdyCB(void); /* * To retrieve doorbell physical address of diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c index 3267e0e83a82..0bf2be7f8463 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c @@ -630,6 +630,8 @@ int ipa3_mhi_destroy_channel(enum ipa_client_type client) } ep = &ipa3_ctx->ep[ipa_ep_idx]; + IPA_ACTIVE_CLIENTS_INC_EP(client); + IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n", ep->gsi_evt_ring_hdl, ipa_ep_idx); @@ -651,8 +653,10 @@ int ipa3_mhi_destroy_channel(enum ipa_client_type client) goto fail; } + IPA_ACTIVE_CLIENTS_DEC_EP(client); return 0; fail: + IPA_ACTIVE_CLIENTS_DEC_EP(client); return res; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c index 0256ff89ae24..a78a0a608cb4 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c @@ -34,7 +34,6 @@ enum nat_table_type { #define NAT_TABLE_ENTRY_SIZE_BYTE 32 #define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4 - static int ipa3_nat_vma_fault_remap( struct vm_area_struct *vma, struct vm_fault *vmf) { @@ -167,7 +166,7 @@ int ipa3_create_nat_device(void) IPADBG("\n"); mutex_lock(&nat_ctx->lock); - nat_ctx->class = class_create(THIS_MODULE, NAT_DEV_NAME); + nat_ctx->class = class_create(THIS_MODULE, IPA_NAT_DEV_NAME); if (IS_ERR(nat_ctx->class)) { IPAERR("unable to create the class\n"); result = -ENODEV; @@ -176,7 +175,7 @@ int ipa3_create_nat_device(void) result = alloc_chrdev_region(&nat_ctx->dev_num, 0, 1, - NAT_DEV_NAME); + IPA_NAT_DEV_NAME); if (result) { IPAERR("alloc_chrdev_region err.\n"); result = -ENODEV; @@ -185,7 +184,7 @@ int ipa3_create_nat_device(void) nat_ctx->dev = device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx, - "%s", NAT_DEV_NAME); + "%s", IPA_NAT_DEV_NAME); if (IS_ERR(nat_ctx->dev)) { IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev)); @@ -253,9 +252,10 @@ int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem) IPADBG("passed memory size %zu\n", mem->size); mutex_lock(&nat_ctx->lock); - if (strcmp(mem->dev_name, NAT_DEV_NAME)) { + if (strcmp(IPA_NAT_DEV_NAME, mem->dev_name)) { IPAERR_RL("Nat device name mismatch\n"); - IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name); + IPAERR_RL("Expect: %s Recv: %s\n", + IPA_NAT_DEV_NAME, mem->dev_name); result = -EPERM; goto bail; } @@ -306,6 +306,34 @@ bail: return result; } +/** +* ipa3_allocate_nat_table() - Allocates memory for the NAT table +* @table_alloc: [in/out] memory parameters +* +* Called by NAT client to allocate memory for the table entries. +* Based on the request size either shared or system memory will be used. +* +* Returns: 0 on success, negative on failure +*/ +int ipa3_allocate_nat_table(struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc) +{ + int result; + struct ipa_ioc_nat_alloc_mem tmp; + + strlcpy(tmp.dev_name, IPA_NAT_DEV_NAME, IPA_RESOURCE_NAME_MAX); + tmp.size = table_alloc->size; + tmp.offset = 0; + + result = ipa3_allocate_nat_device(&tmp); + if (result) + goto bail; + + table_alloc->offset = tmp.offset; + +bail: + return result; +} + /* IOCTL function handlers */ /** * ipa3_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW @@ -833,3 +861,22 @@ destroy_regwrt_imm_cmd: bail: return result; } + +/** +* ipa3_del_nat_table() - Delete the NAT table +* @del: [in] delete table parameters +* +* Called by NAT client to delete the table +* +* Returns: 0 on success, negative on failure +*/ +int ipa3_del_nat_table(struct ipa_ioc_nat_ipv6ct_table_del *del) +{ + struct ipa_ioc_v4_nat_del tmp; + + tmp.table_index = del->table_index; + tmp.public_ip_addr = ipa3_ctx->nat_mem.public_ip_addr; + + return ipa3_nat_del_cmd(&tmp); +} + diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c index 571852c076ea..4897c4dccf59 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -695,6 +695,57 @@ int ipa3_qmi_filter_request_ex_send( resp.resp.error, "ipa_install_filter"); } +/* sending ul-filter-install-request to modem*/ +int ipa3_qmi_ul_filter_request_send( + struct ipa_configure_ul_firewall_rules_req_msg_v01 *req) +{ + struct ipa_configure_ul_firewall_rules_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc; + + IPAWANDBG("IPACM pass %u rules to Q6\n", + req->firewall_rules_list_len); + + mutex_lock(&ipa3_qmi_lock); + if (ipa3_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy( + &(ipa3_qmi_ctx->ipa_configure_ul_firewall_rules_req_msg_cache[ + ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg]), + req, + sizeof(struct + ipa_configure_ul_firewall_rules_req_msg_v01)); + ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg++; + ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg %= + MAX_NUM_QMI_RULE_CACHE; + } + mutex_unlock(&ipa3_qmi_lock); + + req_desc.max_msg_len = + QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01; + req_desc.ei_array = + ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei; + + memset(&resp, 0, + sizeof(struct ipa_configure_ul_firewall_rules_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01; + resp_desc.ei_array = + ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei; + + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, + req, + sizeof( + struct ipa_configure_ul_firewall_rules_req_msg_v01), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_REQ_TIMEOUT_MS); + return ipa3_check_qmi_response(rc, + QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_received_ul_firewall_filter"); +} + int ipa3_qmi_enable_force_clear_datapath_send( struct ipa_enable_force_clear_datapath_req_msg_v01 *req) { @@ -880,6 +931,7 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id, void *ind_cb_priv) { struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind; + struct ipa_configure_ul_firewall_rules_ind_msg_v01 qmi_ul_firewall_ind; struct msg_desc qmi_ind_desc; int rc = 0; @@ -888,7 +940,7 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id, return; } - if (QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01 == msg_id) { + if (msg_id == QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01) { memset(&qmi_ind, 0, sizeof( struct ipa_data_usage_quota_reached_ind_msg_v01)); qmi_ind_desc.max_msg_len = @@ -908,6 +960,36 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id, ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id, IPA_UPSTEAM_MODEM); } + + if (msg_id == QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01) { + memset(&qmi_ul_firewall_ind, 0, sizeof( + struct ipa_configure_ul_firewall_rules_ind_msg_v01)); + qmi_ind_desc.max_msg_len = + QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01; + qmi_ind_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01; + qmi_ind_desc.ei_array = + ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei; + + rc = qmi_kernel_decode( + &qmi_ind_desc, &qmi_ul_firewall_ind, msg, msg_len); + if (rc < 0) { + IPAWANERR("Error decoding msg_id %d\n", msg_id); + return; + } + + IPAWANDBG("UL firewall rules install indication on Q6"); + if (qmi_ul_firewall_ind.result.is_success == + QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01) { + IPAWANDBG(" : Success\n"); + IPAWANDBG + ("Mux ID : %d\n", qmi_ul_firewall_ind.result.mux_id); + } else if (qmi_ul_firewall_ind.result.is_success == + QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01){ + IPAWANERR(": Failure\n"); + } else { + IPAWANERR(": Unexpected Result"); + } + } } static void ipa3_q6_clnt_svc_arrive(struct work_struct *work) @@ -1363,6 +1445,74 @@ int ipa3_qmi_stop_data_qouta(void) resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01"); } +int ipa3_qmi_enable_per_client_stats( + struct ipa_enable_per_client_stats_req_msg_v01 *req, + struct ipa_enable_per_client_stats_resp_msg_v01 *resp) +{ + struct msg_desc req_desc, resp_desc; + int rc; + + req_desc.max_msg_len = + QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = + QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01; + req_desc.ei_array = + ipa3_enable_per_client_stats_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = + QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01; + resp_desc.ei_array = + ipa3_enable_per_client_stats_resp_msg_data_v01_ei; + + IPAWANDBG("Sending QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01\n"); + + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + sizeof(struct ipa_enable_per_client_stats_req_msg_v01), + &resp_desc, resp, + sizeof(struct ipa_enable_per_client_stats_resp_msg_v01), + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + IPAWANDBG("QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01, resp->resp.result, + resp->resp.error, "ipa3_qmi_enable_per_client_stats"); +} + +int ipa3_qmi_get_per_client_packet_stats( + struct ipa_get_stats_per_client_req_msg_v01 *req, + struct ipa_get_stats_per_client_resp_msg_v01 *resp) +{ + struct msg_desc req_desc, resp_desc; + int rc; + + req_desc.max_msg_len = QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01; + req_desc.ei_array = ipa3_get_stats_per_client_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01; + resp_desc.ei_array = ipa3_get_stats_per_client_resp_msg_data_v01_ei; + + IPAWANDBG("Sending QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01\n"); + + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + sizeof(struct ipa_get_stats_per_client_req_msg_v01), + &resp_desc, resp, + sizeof(struct ipa_get_stats_per_client_resp_msg_v01), + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + IPAWANDBG("QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 received\n"); + + return ipa3_check_qmi_response(rc, + QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01, resp->resp.result, + resp->resp.error, + "struct ipa_get_stats_per_client_req_msg_v01"); +} + void ipa3_qmi_init(void) { mutex_init(&ipa3_qmi_lock); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h index e6f1e2ce0b75..297dca6b88cf 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h @@ -32,54 +32,62 @@ #define IPAWANDBG(fmt, args...) \ do { \ - pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + pr_debug(DEV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args); \ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ - DEV_NAME " %s:%d " fmt, ## args); \ + DEV_NAME " %s:%d " fmt, ## args); \ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ - DEV_NAME " %s:%d " fmt, ## args); \ + DEV_NAME " %s:%d " fmt, ## args); \ } while (0) #define IPAWANDBG_LOW(fmt, args...) \ do { \ - pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + pr_debug(DEV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args); \ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ - DEV_NAME " %s:%d " fmt, ## args); \ + DEV_NAME " %s:%d " fmt, ## args); \ } while (0) #define IPAWANERR(fmt, args...) \ do { \ - pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + pr_err(DEV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args); \ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ - DEV_NAME " %s:%d " fmt, ## args); \ + DEV_NAME " %s:%d " fmt, ## args); \ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ - DEV_NAME " %s:%d " fmt, ## args); \ + DEV_NAME " %s:%d " fmt, ## args); \ } while (0) #define IPAWANINFO(fmt, args...) \ do { \ - pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + pr_info(DEV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args); \ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ - DEV_NAME " %s:%d " fmt, ## args); \ + DEV_NAME " %s:%d " fmt, ## args); \ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ - DEV_NAME " %s:%d " fmt, ## args); \ + DEV_NAME " %s:%d " fmt, ## args); \ } while (0) extern struct ipa3_qmi_context *ipa3_qmi_ctx; struct ipa3_qmi_context { -struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE]; -u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE]; -int num_ipa_install_fltr_rule_req_msg; -struct ipa_install_fltr_rule_req_msg_v01 + struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE]; + u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE]; + int num_ipa_install_fltr_rule_req_msg; + struct ipa_install_fltr_rule_req_msg_v01 ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE]; -int num_ipa_install_fltr_rule_req_ex_msg; -struct ipa_install_fltr_rule_req_ex_msg_v01 + int num_ipa_install_fltr_rule_req_ex_msg; + struct ipa_install_fltr_rule_req_ex_msg_v01 ipa_install_fltr_rule_req_ex_msg_cache[MAX_NUM_QMI_RULE_CACHE]; -int num_ipa_fltr_installed_notif_req_msg; -struct ipa_fltr_installed_notif_req_msg_v01 + int num_ipa_fltr_installed_notif_req_msg; + struct ipa_fltr_installed_notif_req_msg_v01 ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE]; -bool modem_cfg_emb_pipe_flt; + int num_ipa_configure_ul_firewall_rules_req_msg; + struct ipa_configure_ul_firewall_rules_req_msg_v01 + ipa_configure_ul_firewall_rules_req_msg_cache + [MAX_NUM_QMI_RULE_CACHE]; + bool modem_cfg_emb_pipe_flt; }; struct ipa3_rmnet_mux_val { @@ -95,56 +103,69 @@ extern struct elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[]; extern struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[]; extern struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[]; extern struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[]; -extern struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[]; + +extern struct elem_info + ipa3_master_driver_init_complt_ind_msg_data_v01_ei[]; extern struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[]; extern struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[]; extern struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[]; extern struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[]; -extern struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[]; -extern struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[]; -extern struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[]; + +extern struct elem_info + ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[]; +extern struct elem_info + ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[]; +extern struct elem_info + ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[]; extern struct elem_info ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[]; extern struct elem_info ipa3_config_req_msg_data_v01_ei[]; extern struct elem_info ipa3_config_resp_msg_data_v01_ei[]; extern struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[]; extern struct elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[]; -extern struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[]; -extern struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[]; -extern struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[]; -extern struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[]; -extern struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[]; -extern struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[]; -extern struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[]; -extern struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[]; -extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[]; -extern struct elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[]; -extern struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[]; - - extern struct elem_info - ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[]; - extern struct elem_info - ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[]; - extern struct elem_info - ipa3_ul_firewall_rule_type_data_v01_ei[]; - extern struct elem_info - ipa3_ul_firewall_config_result_type_data_v01_ei[]; - extern struct elem_info - ipa3_per_client_stats_info_type_data_v01_ei[]; - extern struct elem_info - ipa3_enable_per_client_stats_req_msg_data_v01_ei[]; - extern struct elem_info - ipa3_enable_per_client_stats_resp_msg_data_v01_ei[]; - extern struct elem_info - ipa3_get_stats_per_client_req_msg_data_v01_ei[]; - extern struct elem_info - ipa3_get_stats_per_client_resp_msg_data_v01_ei[]; - extern struct elem_info - ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[]; - extern struct elem_info - ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[]; - extern struct elem_info - ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[]; + +extern struct elem_info + ipa3_get_apn_data_stats_req_msg_data_v01_ei[]; +extern struct elem_info + ipa3_get_apn_data_stats_resp_msg_data_v01_ei[]; +extern struct elem_info + ipa3_set_data_usage_quota_req_msg_data_v01_ei[]; +extern struct elem_info + ipa3_set_data_usage_quota_resp_msg_data_v01_ei[]; +extern struct elem_info + ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[]; +extern struct elem_info + ipa3_stop_data_usage_quota_req_msg_data_v01_ei[]; +extern struct elem_info + ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[]; +extern struct elem_info + ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[]; +extern struct elem_info + ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[]; +extern struct elem_info + ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[]; +extern struct elem_info + ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[]; +extern struct elem_info + ipa3_ul_firewall_rule_type_data_v01_ei[]; +extern struct elem_info + ipa3_ul_firewall_config_result_type_data_v01_ei[]; +extern struct elem_info + ipa3_per_client_stats_info_type_data_v01_ei[]; +extern struct elem_info + ipa3_enable_per_client_stats_req_msg_data_v01_ei[]; +extern struct elem_info + ipa3_enable_per_client_stats_resp_msg_data_v01_ei[]; +extern struct elem_info + ipa3_get_stats_per_client_req_msg_data_v01_ei[]; +extern struct elem_info + ipa3_get_stats_per_client_resp_msg_data_v01_ei[]; +extern struct elem_info + ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[]; +extern struct elem_info + ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[]; +extern struct elem_info + ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[]; /** * struct ipa3_rmnet_context - IPA rmnet context @@ -173,6 +194,9 @@ int ipa3_qmi_filter_request_send( int ipa3_qmi_filter_request_ex_send( struct ipa_install_fltr_rule_req_ex_msg_v01 *req); +int ipa3_qmi_ul_filter_request_send( + struct ipa_configure_ul_firewall_rules_req_msg_v01 *req); + /* sending filter-installed-notify-request to modem*/ int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req); @@ -219,6 +243,16 @@ int rmnet_ipa3_query_tethering_stats_all( struct wan_ioctl_query_tether_stats_all *data); int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data); +int rmnet_ipa3_set_lan_client_info(struct wan_ioctl_lan_client_info *data); + +int rmnet_ipa3_clear_lan_client_info(struct wan_ioctl_lan_client_info *data); + +int rmnet_ipa3_send_lan_client_msg(struct wan_ioctl_send_lan_client_msg *data); + +int rmnet_ipa3_enable_per_client_stats(bool *data); + +int rmnet_ipa3_query_per_client_stats( + struct wan_ioctl_query_per_client_stats *data); int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, struct ipa_get_data_stats_resp_msg_v01 *resp); @@ -232,6 +266,14 @@ int ipa3_qmi_stop_data_qouta(void); void ipa3_q6_handshake_complete(bool ssr_bootup); +int ipa3_qmi_enable_per_client_stats( + struct ipa_enable_per_client_stats_req_msg_v01 *req, + struct ipa_enable_per_client_stats_resp_msg_v01 *resp); + +int ipa3_qmi_get_per_client_packet_stats( + struct ipa_get_stats_per_client_req_msg_v01 *req, + struct ipa_get_stats_per_client_resp_msg_v01 *resp); + void ipa3_qmi_init(void); void ipa3_qmi_cleanup(void); @@ -252,6 +294,12 @@ static inline int ipa3_qmi_filter_request_send( return -EPERM; } +static inline int ipa3_qmi_ul_filter_request_send( + struct ipa_configure_ul_firewall_rules_req_msg_v01 *req) +{ + return -EPERM; +} + static inline int ipa3_qmi_filter_request_ex_send( struct ipa_install_fltr_rule_req_ex_msg_v01 *req) { @@ -348,12 +396,28 @@ static inline int ipa3_qmi_stop_data_qouta(void) static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { } +static inline int ipa3_qmi_enable_per_client_stats( + struct ipa_enable_per_client_stats_req_msg_v01 *req, + struct ipa_enable_per_client_stats_resp_msg_v01 *resp) +{ + return -EPERM; +} + +static inline int ipa3_qmi_get_per_client_packet_stats( + struct ipa_get_stats_per_client_req_msg_v01 *req, + struct ipa_get_stats_per_client_resp_msg_v01 *resp) +{ + return -EPERM; +} + static inline void ipa3_qmi_init(void) { + } static inline void ipa3_qmi_cleanup(void) { + } #endif /* CONFIG_RMNET_IPA3 */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index 8e790c89ed13..b9af782b4f6e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -72,11 +72,18 @@ static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip, if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) { struct ipa3_hdr_proc_ctx_entry *proc_ctx; proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx; - gen_params.hdr_lcl = ipa3_ctx->hdr_proc_ctx_tbl_lcl; - gen_params.hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX; - gen_params.hdr_ofst = proc_ctx->offset_entry->offset + - ipa3_ctx->hdr_proc_ctx_tbl.start_offset; - } else if (entry->hdr) { + if ((proc_ctx == NULL) || + (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) { + gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE; + gen_params.hdr_ofst = 0; + } else { + gen_params.hdr_lcl = ipa3_ctx->hdr_proc_ctx_tbl_lcl; + gen_params.hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX; + gen_params.hdr_ofst = proc_ctx->offset_entry->offset + + ipa3_ctx->hdr_proc_ctx_tbl.start_offset; + } + } else if ((entry->hdr != NULL) && + (entry->hdr->cookie == IPA_HDR_COOKIE)) { gen_params.hdr_lcl = ipa3_ctx->hdr_tbl_lcl; gen_params.hdr_type = IPAHAL_RT_RULE_HDR_RAW; gen_params.hdr_ofst = entry->hdr->offset_entry->offset; @@ -911,7 +918,8 @@ static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule, static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry, const struct ipa_rt_rule *rule, struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr, - struct ipa3_hdr_proc_ctx_entry *proc_ctx) + struct ipa3_hdr_proc_ctx_entry *proc_ctx, + u16 rule_id) { int id; @@ -926,11 +934,16 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry, (*(entry))->tbl = tbl; (*(entry))->hdr = hdr; (*(entry))->proc_ctx = proc_ctx; - id = ipa3_alloc_rule_id(&tbl->rule_ids); - if (id < 0) { - IPAERR("failed to allocate rule id\n"); - WARN_ON(1); - goto alloc_rule_id_fail; + if (rule_id) { + id = rule_id; + (*(entry))->rule_id_valid = 1; + } else { + id = ipa3_alloc_rule_id(&tbl->rule_ids); + if (id < 0) { + IPAERR("failed to allocate rule id\n"); + WARN_ON(1); + goto alloc_rule_id_fail; + } } (*(entry))->rule_id = id; @@ -977,7 +990,8 @@ ipa_insert_failed: } static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, - const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl) + const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl, + u16 rule_id) { struct ipa3_rt_tbl *tbl; struct ipa3_rt_entry *entry; @@ -1005,7 +1019,8 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, goto error; } - if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx)) + if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, + rule_id)) goto error; if (at_rear) @@ -1036,7 +1051,7 @@ static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl, if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx)) goto error; - if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx)) + if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0)) goto error; list_add(&entry->link, &((*add_after_entry)->link)); @@ -1080,8 +1095,54 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name, &rules->rules[i].rule, rules->rules[i].at_rear, - &rules->rules[i].rt_rule_hdl)) { - IPAERR_RL("failed to add rt rule %d\n", i); + &rules->rules[i].rt_rule_hdl, + 0)) { + IPAERR("failed to add rt rule %d\n", i); + rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa3_ctx->lock); + return ret; +} + +/** + * ipa3_add_rt_rule_ext() - Add the specified routing rules to SW with rule id + * and optionally commit to IPA HW + * @rules: [inout] set of routing rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules) +{ + int i; + int ret; + + if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) { + IPAERR("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa3_ctx->lock); + for (i = 0; i < rules->num_rules; i++) { + if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].rt_rule_hdl, + rules->rules[i].rule_id)) { + IPAERR("failed to add rt rule %d\n", i); rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; } else { rules->rules[i].status = 0; @@ -1145,6 +1206,13 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules) goto bail; } + if (entry->cookie != IPA_RT_RULE_COOKIE) { + IPAERR_RL("Invalid cookie value = %u rule %d in rt tbls\n", + entry->cookie, rules->add_after_hdl); + ret = -EINVAL; + goto bail; + } + if (entry->tbl != tbl) { IPAERR_RL("given rt rule does not match the table\n"); ret = -EINVAL; @@ -1222,7 +1290,9 @@ int __ipa3_del_rt_rule(u32 rule_hdl) IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u", entry->tbl->idx, entry->tbl->rule_cnt, entry->rule_id, entry->tbl->ref_cnt); - idr_remove(&entry->tbl->rule_ids, entry->rule_id); + /* if rule id was allocated from idr, remove it */ + if (!entry->rule_id_valid) + idr_remove(&entry->tbl->rule_ids, entry->rule_id); if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) { if (__ipa_del_rt_tbl(entry->tbl)) IPAERR_RL("fail to del RT tbl\n"); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c index fdb6d05f683d..d4ff9c6ff851 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -149,6 +149,49 @@ int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats) return 0; } + +int ipa3_ntn_uc_reg_rdyCB(void (*ipa_ready_cb)(void *), void *user_data) +{ + int ret; + + if (!ipa3_ctx) { + IPAERR("IPA ctx is null\n"); + return -ENXIO; + } + + ret = ipa3_uc_state_check(); + if (ret) { + ipa3_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb; + ipa3_ctx->uc_ntn_ctx.priv = user_data; + return 0; + } + + return -EEXIST; +} + +void ipa3_ntn_uc_dereg_rdyCB(void) +{ + ipa3_ctx->uc_ntn_ctx.uc_ready_cb = NULL; + ipa3_ctx->uc_ntn_ctx.priv = NULL; +} + +static void ipa3_uc_ntn_loaded_handler(void) +{ + if (!ipa3_ctx) { + IPAERR("IPA ctx is null\n"); + return; + } + + if (ipa3_ctx->uc_ntn_ctx.uc_ready_cb) { + ipa3_ctx->uc_ntn_ctx.uc_ready_cb( + ipa3_ctx->uc_ntn_ctx.priv); + + ipa3_ctx->uc_ntn_ctx.uc_ready_cb = + NULL; + ipa3_ctx->uc_ntn_ctx.priv = NULL; + } +} + int ipa3_ntn_init(void) { struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 }; @@ -156,6 +199,8 @@ int ipa3_ntn_init(void) uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler; uc_ntn_cbs.ipa_uc_event_log_info_hdlr = ipa3_uc_ntn_event_log_info_handler; + uc_ntn_cbs.ipa_uc_loaded_hdlr = + ipa3_uc_ntn_loaded_handler; ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 4979f62b928f..29f2046610c8 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -3287,6 +3287,8 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type, api_ctrl->ipa_tear_down_uc_offload_pipes = ipa3_tear_down_uc_offload_pipes; api_ctrl->ipa_get_pdev = ipa3_get_pdev; + api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa3_ntn_uc_reg_rdyCB; + api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa3_ntn_uc_dereg_rdyCB; return 0; } diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index 8fbde6675070..c810adc466b3 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -142,6 +142,10 @@ struct rmnet_ipa3_context { u32 ipa3_to_apps_hdl; struct mutex pipe_handle_guard; struct mutex add_mux_channel_lock; + struct mutex per_client_stats_guard; + struct ipa_tether_device_info + tether_device + [IPACM_MAX_CLIENT_DEVICE_TYPES]; }; static struct rmnet_ipa3_context *rmnet_ipa3_ctx; @@ -423,6 +427,8 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 { int i, j; + /* prevent multi-threads accessing rmnet_ipa3_ctx->num_q6_rules */ + mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock); if (rule_req->filter_spec_ex_list_valid == true) { rmnet_ipa3_ctx->num_q6_rules = rule_req->filter_spec_ex_list_len; @@ -431,6 +437,8 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 } else { rmnet_ipa3_ctx->num_q6_rules = 0; IPAWANERR("got no UL rules from modem\n"); + mutex_unlock(&rmnet_ipa3_ctx-> + add_mux_channel_lock); return -EINVAL; } @@ -633,9 +641,13 @@ failure: rmnet_ipa3_ctx->num_q6_rules = 0; memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0, sizeof(ipa3_qmi_ctx->q6_ul_filter_rule)); + mutex_unlock(&rmnet_ipa3_ctx-> + add_mux_channel_lock); return -EINVAL; success: + mutex_unlock(&rmnet_ipa3_ctx-> + add_mux_channel_lock); return 0; } @@ -1437,8 +1449,13 @@ static int handle3_egress_format(struct net_device *dev, if (rmnet_ipa3_ctx->num_q6_rules != 0) { /* already got Q6 UL filter rules*/ - if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) + if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) { + /* prevent multi-threads accessing num_q6_rules */ + mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock); rc = ipa3_wwan_add_ul_flt_rule_to_ipa(); + mutex_unlock(&rmnet_ipa3_ctx-> + add_mux_channel_lock); + } if (rc) IPAWANERR("install UL rules failed\n"); else @@ -2571,7 +2588,9 @@ static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type) } if (type != IPA_TETHERING_STATS_UPDATE_STATS && - type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) { + type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS && + type != IPA_PER_CLIENT_STATS_CONNECT_EVENT && + type != IPA_PER_CLIENT_STATS_DISCONNECT_EVENT) { IPAWANERR("Wrong type given. buff %p type %d\n", buff, type); } @@ -2819,6 +2838,9 @@ int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data) enum ipa_upstream_type upstream_type; int rc = 0; + /* prevent string buffer overflows */ + data->interface_name[IFNAMSIZ-1] = '\0'; + /* get IPA backhaul type */ upstream_type = find_upstream_type(data->interface_name); @@ -3111,6 +3133,10 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, enum ipa_upstream_type upstream_type; int rc = 0; + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + data->tetherIface[IFNAMSIZ-1] = '\0'; + /* get IPA backhaul type */ upstream_type = find_upstream_type(data->upstreamIface); @@ -3145,6 +3171,10 @@ int rmnet_ipa3_query_tethering_stats_all( int rc = 0; memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); + + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + /* get IPA backhaul type */ upstream_type = find_upstream_type(data->upstreamIface); @@ -3188,6 +3218,9 @@ int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data) memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + /* get IPA backhaul type */ upstream_type = find_upstream_type(data->upstreamIface); @@ -3317,8 +3350,488 @@ void ipa3_q6_handshake_complete(bool ssr_bootup) } } +static inline bool rmnet_ipa3_check_any_client_inited +( + enum ipacm_per_client_device_type device_type +) +{ + int i = 0; + + for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) { + if (rmnet_ipa3_ctx->tether_device[device_type]. + lan_client[i].client_idx != -1 && + rmnet_ipa3_ctx->tether_device[device_type]. + lan_client[i].inited) { + IPAWANERR("Found client index: %d which is inited\n", + i); + return true; + } + } + + return false; +} + +static inline int rmnet_ipa3_get_lan_client_info +( + enum ipacm_per_client_device_type device_type, + uint8_t mac[] +) +{ + int i = 0; + + IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], + mac[3], mac[4], mac[5]); + + for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) { + if (memcmp( + rmnet_ipa3_ctx->tether_device[device_type]. + lan_client[i].mac, + mac, + IPA_MAC_ADDR_SIZE) == 0) { + IPAWANDBG("Matched client index: %d\n", i); + return i; + } + } + + return -EINVAL; +} + +static inline int rmnet_ipa3_delete_lan_client_info +( + enum ipacm_per_client_device_type device_type, + int lan_clnt_idx +) +{ + struct ipa_lan_client *lan_client = NULL; + int i; + + /* Check if the request is to clean up all clients. */ + if (lan_clnt_idx == 0xffffffff) { + /* Reset the complete device info. */ + memset(&rmnet_ipa3_ctx->tether_device[device_type], 0, + sizeof(struct ipa_tether_device_info)); + rmnet_ipa3_ctx->tether_device[device_type].ul_src_pipe = -1; + for (i = 0; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) + rmnet_ipa3_ctx->tether_device[device_type]. + lan_client[i].client_idx = -1; + } else { + lan_client = + &rmnet_ipa3_ctx->tether_device[device_type]. + lan_client[lan_clnt_idx]; + /* Reset the client info before sending the message. */ + memset(lan_client, 0, sizeof(struct ipa_lan_client)); + lan_client->client_idx = -1; + + } + return 0; +} + +/* rmnet_ipa3_set_lan_client_info() - + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_LAN_CLIENT_INFO. + * It is used to store LAN client information which + * is used to fetch the packet stats for a client. + * + * Return codes: + * 0: Success + * -EINVAL: Invalid args provided + */ +int rmnet_ipa3_set_lan_client_info( + struct wan_ioctl_lan_client_info *data) +{ + + struct ipa_lan_client *lan_client = NULL; + + + IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + data->mac[0], data->mac[1], data->mac[2], + data->mac[3], data->mac[4], data->mac[5]); + + /* Check if Device type is valid. */ + if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES || + data->device_type < 0) { + IPAWANERR("Invalid Device type: %d\n", data->device_type); + return -EINVAL; + } + + /* Check if Client index is valid. */ + if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS || + data->client_idx < 0) { + IPAWANERR("Invalid Client Index: %d\n", data->client_idx); + return -EINVAL; + } + + mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard); + if (data->client_init) { + /* check if the client is already inited. */ + if (rmnet_ipa3_ctx->tether_device[data->device_type] + .lan_client[data->client_idx].inited) { + IPAWANERR("Client already inited: %d:%d\n", + data->device_type, data->client_idx); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -EINVAL; + } + } + + lan_client = + &rmnet_ipa3_ctx->tether_device[data->device_type]. + lan_client[data->client_idx]; + + memcpy(lan_client->mac, data->mac, IPA_MAC_ADDR_SIZE); + + lan_client->client_idx = data->client_idx; + + /* Update the Source pipe. */ + rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe = + ipa3_get_ep_mapping(data->ul_src_pipe); + + /* Update the header length if not set. */ + if (!rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len) + rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len = + data->hdr_len; + + lan_client->inited = true; + + rmnet_ipa3_ctx->tether_device[data->device_type].num_clients++; + + IPAWANDBG("Set the lan client info: %d, %d, %d\n", + lan_client->client_idx, + rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe, + rmnet_ipa3_ctx->tether_device[data->device_type].num_clients); + + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + + return 0; +} + +/* rmnet_ipa3_delete_lan_client_info() - + * @data - IOCTL data + * + * This function handles WAN_IOC_DELETE_LAN_CLIENT_INFO. + * It is used to delete LAN client information which + * is used to fetch the packet stats for a client. + * + * Return codes: + * 0: Success + * -EINVAL: Invalid args provided + */ +int rmnet_ipa3_clear_lan_client_info( + struct wan_ioctl_lan_client_info *data) +{ + + struct ipa_lan_client *lan_client = NULL; + + + IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + data->mac[0], data->mac[1], data->mac[2], + data->mac[3], data->mac[4], data->mac[5]); + + /* Check if Device type is valid. */ + if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES || + data->device_type < 0) { + IPAWANERR("Invalid Device type: %d\n", data->device_type); + return -EINVAL; + } + + /* Check if Client index is valid. */ + if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS || + data->client_idx < 0) { + IPAWANERR("Invalid Client Index: %d\n", data->client_idx); + return -EINVAL; + } + + mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard); + lan_client = + &rmnet_ipa3_ctx->tether_device[data->device_type]. + lan_client[data->client_idx]; + + if (!data->client_init) { + /* check if the client is already de-inited. */ + if (!lan_client->inited) { + IPAWANERR("Client already de-inited: %d:%d\n", + data->device_type, data->client_idx); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -EINVAL; + } + } + + lan_client->inited = false; + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + + return 0; +} + + +/* rmnet_ipa3_send_lan_client_msg() - + * @data - IOCTL data + * + * This function handles WAN_IOC_SEND_LAN_CLIENT_MSG. + * It is used to send LAN client information to IPACM. + * + * Return codes: + * 0: Success + * -EINVAL: Invalid args provided + */ +int rmnet_ipa3_send_lan_client_msg( + struct wan_ioctl_send_lan_client_msg *data) +{ + struct ipa_msg_meta msg_meta; + int rc; + struct ipa_lan_client_msg *lan_client; + + /* Notify IPACM to reset the client index. */ + lan_client = kzalloc(sizeof(struct ipa_lan_client_msg), + GFP_KERNEL); + if (!lan_client) { + IPAWANERR("Can't allocate memory for tether_info\n"); + return -ENOMEM; + } + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + memcpy(lan_client, &data->lan_client, + sizeof(struct ipa_lan_client_msg)); + msg_meta.msg_type = data->client_event; + msg_meta.msg_len = sizeof(struct ipa_lan_client_msg); + + rc = ipa_send_msg(&msg_meta, lan_client, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + kfree(lan_client); + return rc; + } + return 0; +} + +/* rmnet_ipa3_enable_per_client_stats() - + * @data - IOCTL data + * + * This function handles WAN_IOC_ENABLE_PER_CLIENT_STATS. + * It is used to indicate Q6 to start capturing per client stats. + * + * Return codes: + * 0: Success + * -EINVAL: Invalid args provided + */ +int rmnet_ipa3_enable_per_client_stats( + bool *data) +{ + struct ipa_enable_per_client_stats_req_msg_v01 *req; + struct ipa_enable_per_client_stats_resp_msg_v01 *resp; + int rc; + + req = + kzalloc(sizeof(struct ipa_enable_per_client_stats_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAWANERR("Can't allocate memory for stats message\n"); + return -ENOMEM; + } + resp = + kzalloc(sizeof(struct ipa_enable_per_client_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + IPAWANERR("Can't allocate memory for stats message\n"); + kfree(req); + return -ENOMEM; + } + memset(req, 0, + sizeof(struct ipa_enable_per_client_stats_req_msg_v01)); + memset(resp, 0, + sizeof(struct ipa_enable_per_client_stats_resp_msg_v01)); + + if (*data) + req->enable_per_client_stats = 1; + else + req->enable_per_client_stats = 0; + + rc = ipa3_qmi_enable_per_client_stats(req, resp); + if (rc) { + IPAWANERR("can't enable per client stats\n"); + kfree(req); + kfree(resp); + return rc; + } + + kfree(req); + kfree(resp); + return 0; +} + +int rmnet_ipa3_query_per_client_stats( + struct wan_ioctl_query_per_client_stats *data) +{ + struct ipa_get_stats_per_client_req_msg_v01 *req; + struct ipa_get_stats_per_client_resp_msg_v01 *resp; + int rc, lan_clnt_idx, lan_clnt_idx1, i; + struct ipa_lan_client *lan_client = NULL; + + + IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + data->client_info[0].mac[0], + data->client_info[0].mac[1], + data->client_info[0].mac[2], + data->client_info[0].mac[3], + data->client_info[0].mac[4], + data->client_info[0].mac[5]); + + /* Check if Device type is valid. */ + if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES || + data->device_type < 0) { + IPAWANERR("Invalid Device type: %d\n", data->device_type); + return -EINVAL; + } + + /* Check if num_clients is valid. */ + if (data->num_clients != IPA_MAX_NUM_HW_PATH_CLIENTS && + data->num_clients != 1) { + IPAWANERR("Invalid number of clients: %d\n", data->num_clients); + return -EINVAL; + } + + mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard); + + if (data->num_clients == 1) { + /* Check if the client info is valid.*/ + lan_clnt_idx1 = rmnet_ipa3_get_lan_client_info( + data->device_type, + data->client_info[0].mac); + if (lan_clnt_idx1 < 0) { + IPAWANERR("Client info not available return.\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -EINVAL; + } + lan_client = + &rmnet_ipa3_ctx->tether_device[data->device_type]. + lan_client[lan_clnt_idx1]; + /* + * Check if disconnect flag is set and + * see if all the clients info are cleared. + */ + if (data->disconnect_clnt && + lan_client->inited) { + IPAWANERR("Client not inited. Try again.\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -EAGAIN; + } + + } else { + /* Max number of clients. */ + /* Check if disconnect flag is set and + * see if all the clients info are cleared. + */ + if (data->disconnect_clnt && + rmnet_ipa3_check_any_client_inited(data->device_type)) { + IPAWANERR("CLient not inited. Try again.\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -EAGAIN; + } + lan_clnt_idx1 = 0xffffffff; + } + + req = kzalloc(sizeof(struct ipa_get_stats_per_client_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAWANERR("Can't allocate memory for stats message\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + return -ENOMEM; + } + resp = kzalloc(sizeof(struct ipa_get_stats_per_client_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + IPAWANERR("Can't allocate memory for stats message\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + kfree(req); + return -ENOMEM; + } + memset(req, 0, sizeof(struct ipa_get_stats_per_client_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_stats_per_client_resp_msg_v01)); + + if (data->reset_stats) { + req->reset_stats_valid = true; + req->reset_stats = true; + IPAWANDBG("fetch and reset the client stats\n"); + } + + req->client_id = lan_clnt_idx1; + req->src_pipe_id = + rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe; + + IPAWANDBG("fetch the client stats for %d, %d\n", req->client_id, + req->src_pipe_id); + + rc = ipa3_qmi_get_per_client_packet_stats(req, resp); + if (rc) { + IPAWANERR("can't get per client stats\n"); + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + kfree(req); + kfree(resp); + return rc; + } + + if (resp->per_client_stats_list_valid) { + for (i = 0; i < resp->per_client_stats_list_len + && i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) { + /* Subtract the header bytes from the DL bytes. */ + data->client_info[i].ipv4_rx_bytes = + (resp->per_client_stats_list[i].num_dl_ipv4_bytes) - + (rmnet_ipa3_ctx-> + tether_device[data->device_type].hdr_len * + resp->per_client_stats_list[i].num_dl_ipv4_pkts); + /* UL header bytes are subtracted by Q6. */ + data->client_info[i].ipv4_tx_bytes = + resp->per_client_stats_list[i].num_ul_ipv4_bytes; + /* Subtract the header bytes from the DL bytes. */ + data->client_info[i].ipv6_rx_bytes = + (resp->per_client_stats_list[i].num_dl_ipv6_bytes) - + (rmnet_ipa3_ctx-> + tether_device[data->device_type].hdr_len * + resp->per_client_stats_list[i].num_dl_ipv6_pkts); + /* UL header bytes are subtracted by Q6. */ + data->client_info[i].ipv6_tx_bytes = + resp->per_client_stats_list[i].num_ul_ipv6_bytes; + + IPAWANDBG("tx_b_v4(%lu)v6(%lu)rx_b_v4(%lu) v6(%lu)\n", + (unsigned long int) data->client_info[i].ipv4_tx_bytes, + (unsigned long int) data->client_info[i].ipv6_tx_bytes, + (unsigned long int) data->client_info[i].ipv4_rx_bytes, + (unsigned long int) data->client_info[i].ipv6_rx_bytes); + + /* Get the lan client index. */ + lan_clnt_idx = resp->per_client_stats_list[i].client_id; + /* Check if lan_clnt_idx is valid. */ + if (lan_clnt_idx < 0 || + lan_clnt_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS) { + IPAWANERR("Lan client index not valid.\n"); + mutex_unlock( + &rmnet_ipa3_ctx->per_client_stats_guard); + kfree(req); + kfree(resp); + ipa_assert(); + return -EINVAL; + } + memcpy(data->client_info[i].mac, + rmnet_ipa3_ctx-> + tether_device[data->device_type]. + lan_client[lan_clnt_idx].mac, + IPA_MAC_ADDR_SIZE); + } + } + + if (data->disconnect_clnt) { + rmnet_ipa3_delete_lan_client_info(data->device_type, + lan_clnt_idx1); + } + + mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard); + kfree(req); + kfree(resp); + return 0; +} + static int __init ipa3_wwan_init(void) { + int i, j; rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL); if (!rmnet_ipa3_ctx) { IPAWANERR("no memory\n"); @@ -3330,6 +3843,14 @@ static int __init ipa3_wwan_init(void) mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard); mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock); + mutex_init(&rmnet_ipa3_ctx->per_client_stats_guard); + /* Reset the Lan Stats. */ + for (i = 0; i < IPACM_MAX_CLIENT_DEVICE_TYPES; i++) { + rmnet_ipa3_ctx->tether_device[i].ul_src_pipe = -1; + for (j = 0; j < IPA_MAX_NUM_HW_PATH_CLIENTS; j++) + rmnet_ipa3_ctx->tether_device[i]. + lan_client[j].client_idx = -1; + } rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1; rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1; @@ -3352,6 +3873,7 @@ static void __exit ipa3_wwan_cleanup(void) ipa3_qmi_cleanup(); mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard); mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock); + mutex_destroy(&rmnet_ipa3_ctx->per_client_stats_guard); ret = subsys_notif_unregister_notifier( rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier); if (ret) diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c index 51bbec464e4d..dc1e5ce511a6 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c @@ -50,6 +50,15 @@ #define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \ WAN_IOCTL_QUERY_TETHER_STATS_ALL, \ compat_uptr_t) +#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \ + compat_uptr_t) +#define WAN_IOCTL_QUERY_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_PER_CLIENT_STATS, \ + compat_uptr_t) +#define WAN_IOCTL_SET_LAN_CLIENT_INFO32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_LAN_CLIENT_INFO, \ + compat_uptr_t) #endif static unsigned int dev_num = 1; @@ -125,6 +134,34 @@ static long ipa3_wan_ioctl(struct file *filp, } break; + case WAN_IOC_ADD_UL_FLT_RULE: + IPAWANDBG("device %s got WAN_IOC_UL_ADD_FLT_RULE :>>>\n", + DRIVER_NAME); + pyld_sz = + sizeof(struct ipa_configure_ul_firewall_rules_req_msg_v01); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, + pyld_sz)) { + retval = -EFAULT; + break; + } + if (ipa3_qmi_ul_filter_request_send( + (struct ipa_configure_ul_firewall_rules_req_msg_v01 *) + param)) { + IPAWANDBG("IPACM->Q6 add ul filter rule failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case WAN_IOC_ADD_FLT_RULE_INDEX: IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n", DRIVER_NAME); @@ -316,6 +353,122 @@ static long ipa3_wan_ioctl(struct file *filp, } break; + case WAN_IOC_ENABLE_PER_CLIENT_STATS: + IPAWANDBG_LOW("got WAN_IOC_ENABLE_PER_CLIENT_STATS :>>>\n"); + pyld_sz = sizeof(bool); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, + pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa3_enable_per_client_stats( + (bool *)param)) { + IPAWANERR("WAN_IOC_ENABLE_PER_CLIENT_STATS failed\n"); + retval = -EFAULT; + break; + } + + break; + + case WAN_IOC_QUERY_PER_CLIENT_STATS: + IPAWANDBG_LOW("got WAN_IOC_QUERY_PER_CLIENT_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_per_client_stats); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, + pyld_sz)) { + retval = -EFAULT; + break; + } + + retval = rmnet_ipa3_query_per_client_stats( + (struct wan_ioctl_query_per_client_stats *)param); + if (retval) { + IPAWANERR("WAN_IOC_QUERY_PER_CLIENT_STATS failed\n"); + break; + } + + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_SET_LAN_CLIENT_INFO: + IPAWANDBG_LOW("got WAN_IOC_SET_LAN_CLIENT_INFO :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_lan_client_info); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, + pyld_sz)) { + retval = -EFAULT; + break; + } + if (rmnet_ipa3_set_lan_client_info( + (struct wan_ioctl_lan_client_info *)param)) { + IPAWANERR("WAN_IOC_SET_LAN_CLIENT_INFO failed\n"); + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_CLEAR_LAN_CLIENT_INFO: + IPAWANDBG_LOW("got WAN_IOC_CLEAR_LAN_CLIENT_INFO :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_lan_client_info); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, + pyld_sz)) { + retval = -EFAULT; + break; + } + if (rmnet_ipa3_clear_lan_client_info( + (struct wan_ioctl_lan_client_info *)param)) { + IPAWANERR("WAN_IOC_CLEAR_LAN_CLIENT_INFO failed\n"); + retval = -EFAULT; + break; + } + break; + + + case WAN_IOC_SEND_LAN_CLIENT_MSG: + IPAWANDBG_LOW("got WAN_IOC_SEND_LAN_CLIENT_MSG :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_send_lan_client_msg); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (const void __user *)arg, + pyld_sz)) { + retval = -EFAULT; + break; + } + if (rmnet_ipa3_send_lan_client_msg( + (struct wan_ioctl_send_lan_client_msg *) + param)) { + IPAWANERR("IOC_SEND_LAN_CLIENT_MSG failed\n"); + retval = -EFAULT; + break; + } + break; + + default: retval = -ENOTTY; } diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c index ea2a91bd2d06..8391dce4b5f0 100644 --- a/drivers/platform/msm/mhi/mhi_states.c +++ b/drivers/platform/msm/mhi/mhi_states.c @@ -18,26 +18,25 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> +static const char * const mhi_states_transition_str[STATE_TRANSITION_MAX] = { + [STATE_TRANSITION_RESET] = "RESET", + [STATE_TRANSITION_READY] = "READY", + [STATE_TRANSITION_M0] = "M0", + [STATE_TRANSITION_M1] = "M1", + [STATE_TRANSITION_M2] = "M2", + [STATE_TRANSITION_M3] = "M3", + [STATE_TRANSITION_BHI] = "BHI", + [STATE_TRANSITION_SBL] = "SBL", + [STATE_TRANSITION_AMSS] = "AMSS", + [STATE_TRANSITION_LINK_DOWN] = "LINK_DOWN", + [STATE_TRANSITION_WAKE] = "WAKE", + [STATE_TRANSITION_BHIE] = "BHIE", + [STATE_TRANSITION_RDDM] = "RDDM", + [STATE_TRANSITION_SYS_ERR] = "SYS_ERR", +}; + const char *state_transition_str(enum STATE_TRANSITION state) { - static const char * const - mhi_states_transition_str[STATE_TRANSITION_MAX] = { - [STATE_TRANSITION_RESET] = "RESET", - [STATE_TRANSITION_READY] = "READY", - [STATE_TRANSITION_M0] = "M0", - [STATE_TRANSITION_M1] = "M1", - [STATE_TRANSITION_M2] = "M2", - [STATE_TRANSITION_M3] = "M3", - [STATE_TRANSITION_BHI] = "BHI", - [STATE_TRANSITION_SBL] = "SBL", - [STATE_TRANSITION_AMSS] = "AMSS", - [STATE_TRANSITION_LINK_DOWN] = "LINK_DOWN", - [STATE_TRANSITION_WAKE] = "WAKE", - [STATE_TRANSITION_BHIE] = "BHIE", - [STATE_TRANSITION_RDDM] = "RDDM", - [STATE_TRANSITION_SYS_ERR] = "SYS_ERR", - }; - return (state < STATE_TRANSITION_MAX) ? mhi_states_transition_str[state] : "Invalid"; } diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c index 4f7d27db8dee..bc15ef2a5b2b 100644 --- a/drivers/platform/msm/msm_ext_display.c +++ b/drivers/platform/msm/msm_ext_display.c @@ -39,6 +39,8 @@ struct msm_ext_disp { struct list_head display_list; struct mutex lock; struct completion hpd_comp; + bool update_audio; + u32 flags; }; static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp, @@ -340,6 +342,8 @@ static int msm_ext_disp_hpd(struct platform_device *pdev, goto end; } + ext_disp->flags = flags; + if (state == EXT_DISPLAY_CABLE_CONNECT) { if (!msm_ext_disp_validate_connect(ext_disp, type, flags)) { pr_err("Display interface (%s) already connected\n", @@ -570,6 +574,7 @@ static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp, { int ret = 0; struct msm_ext_disp_audio_codec_ops *ops = ext_disp->ops; + ext_disp->update_audio = false; if (!(flags & MSM_EXT_DISP_HPD_AUDIO)) { pr_debug("skipping audio ops setup for display (%s)\n", @@ -579,6 +584,10 @@ static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp, if (!ops) { pr_err("Invalid audio ops\n"); + if (state == EXT_DISPLAY_CABLE_CONNECT) { + /* update audio ops once audio codec gets registered */ + ext_disp->update_audio = true; + } ret = -EINVAL; goto end; } @@ -682,6 +691,18 @@ int msm_ext_disp_register_audio_codec(struct platform_device *pdev, pr_debug("audio codec registered\n"); + mutex_lock(&ext_disp->lock); + if (ext_disp->update_audio) { + msm_ext_disp_update_audio_ops(ext_disp, ext_disp->current_disp, + EXT_DISPLAY_CABLE_CONNECT, ext_disp->flags); + + msm_ext_disp_process_audio(ext_disp, ext_disp->current_disp, + EXT_DISPLAY_CABLE_CONNECT, ext_disp->flags); + + ext_disp->update_audio = false; + } + mutex_unlock(&ext_disp->lock); + return ret; } @@ -803,6 +824,8 @@ static int msm_ext_disp_probe(struct platform_device *pdev) INIT_LIST_HEAD(&ext_disp->display_list); init_completion(&ext_disp->hpd_comp); ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX; + ext_disp->flags = 0; + ext_disp->update_audio = false; return ret; diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c index 49e0666d0c23..a9bcabfdb009 100644 --- a/drivers/platform/msm/usb_bam.c +++ b/drivers/platform/msm/usb_bam.c @@ -126,13 +126,13 @@ static char *bam_enable_strings[MAX_BAMS] = { * CI_CTRL & DWC3_CTRL shouldn't be used simultaneously * since both share the same prod & cons rm resourses */ -static enum ipa_client_type ipa_rm_resource_prod[MAX_BAMS] = { +static enum ipa_rm_resource_name ipa_rm_resource_prod[MAX_BAMS] = { [CI_CTRL] = IPA_RM_RESOURCE_USB_PROD, [HSIC_CTRL] = IPA_RM_RESOURCE_HSIC_PROD, [DWC3_CTRL] = IPA_RM_RESOURCE_USB_PROD, }; -static enum ipa_client_type ipa_rm_resource_cons[MAX_BAMS] = { +static enum ipa_rm_resource_name ipa_rm_resource_cons[MAX_BAMS] = { [CI_CTRL] = IPA_RM_RESOURCE_USB_CONS, [HSIC_CTRL] = IPA_RM_RESOURCE_HSIC_CONS, [DWC3_CTRL] = IPA_RM_RESOURCE_USB_CONS, @@ -1625,6 +1625,22 @@ static void usb_bam_ipa_create_resources(enum usb_ctrl cur_bam) } } +static void usb_bam_ipa_delete_resources(enum usb_ctrl cur_bam) +{ + int ret; + + ret = ipa_rm_delete_resource(ipa_rm_resource_prod[cur_bam]); + if (ret) + log_event_err("%s: Failed to delete USB_PROD resource\n", + __func__); + + ret = ipa_rm_delete_resource(ipa_rm_resource_cons[cur_bam]); + if (ret) + log_event_err("%s: Failed to delete USB_CONS resource\n", + __func__); + +} + static void wait_for_prod_granted(enum usb_ctrl cur_bam) { int ret; @@ -3401,6 +3417,7 @@ static int usb_bam_remove(struct platform_device *pdev) { struct usb_bam_ctx_type *ctx = dev_get_drvdata(&pdev->dev); + usb_bam_ipa_delete_resources(ctx->usb_bam_data->bam_type); usb_bam_unregister_panic_hdlr(); sps_deregister_bam_device(ctx->h_bam); destroy_workqueue(ctx->usb_bam_wq); diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index a45a51490817..2e9ff2afcba2 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -247,6 +247,8 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(low_power), POWER_SUPPLY_ATTR(temp_cool), POWER_SUPPLY_ATTR(temp_warm), + POWER_SUPPLY_ATTR(temp_cold), + POWER_SUPPLY_ATTR(temp_hot), POWER_SUPPLY_ATTR(system_temp_level), POWER_SUPPLY_ATTR(resistance), POWER_SUPPLY_ATTR(resistance_capacitive), diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c index 5e8cc84fbfbf..cb26658e564e 100644 --- a/drivers/power/supply/qcom/battery.c +++ b/drivers/power/supply/qcom/battery.c @@ -291,7 +291,7 @@ static struct class_attribute pl_attributes[] = { * TAPER * ************/ #define MINIMUM_PARALLEL_FCC_UA 500000 -#define PL_TAPER_WORK_DELAY_MS 100 +#define PL_TAPER_WORK_DELAY_MS 500 #define TAPER_RESIDUAL_PCT 75 static void pl_taper_work(struct work_struct *work) { @@ -349,7 +349,7 @@ done: * FCC * **********/ #define EFFICIENCY_PCT 80 -static void split_fcc(struct pl_data *chip, int total_ua, +static void get_fcc_split(struct pl_data *chip, int total_ua, int *master_ua, int *slave_ua) { int rc, effective_total_ua, slave_limited_ua, hw_cc_delta_ua = 0, @@ -388,7 +388,6 @@ static void split_fcc(struct pl_data *chip, int total_ua, effective_total_ua = max(0, total_ua + hw_cc_delta_ua); slave_limited_ua = min(effective_total_ua, bcl_ua); *slave_ua = (slave_limited_ua * chip->slave_pct) / 100; - *slave_ua = (*slave_ua * chip->taper_pct) / 100; /* * In USBIN_USBIN configuration with internal rsense parallel * charger's current goes through main charger's BATFET, keep @@ -398,6 +397,8 @@ static void split_fcc(struct pl_data *chip, int total_ua, *master_ua = max(0, total_ua); else *master_ua = max(0, total_ua - *slave_ua); + + *slave_ua = (*slave_ua * chip->taper_pct) / 100; } static int pl_fcc_vote_callback(struct votable *votable, void *data, @@ -425,7 +426,8 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data, } if (chip->pl_mode != POWER_SUPPLY_PL_NONE) { - split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua); + get_fcc_split(chip, total_fcc_ua, + &master_fcc_ua, &slave_fcc_ua); pval.intval = slave_fcc_ua; rc = power_supply_set_property(chip->pl_psy, diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h index 88dcdd8fd7be..720a6c1945f6 100644 --- a/drivers/power/supply/qcom/fg-core.h +++ b/drivers/power/supply/qcom/fg-core.h @@ -104,7 +104,7 @@ enum sram_access_flags { }; /* JEITA */ -enum { +enum jeita_levels { JEITA_COLD = 0, JEITA_COOL, JEITA_WARM, @@ -239,6 +239,7 @@ enum ttf_mode { struct fg_dt_props { bool force_load_profile; bool hold_soc_while_full; + bool linearize_soc; bool auto_recharge_soc; int cutoff_volt_mv; int empty_volt_mv; @@ -272,6 +273,7 @@ struct fg_dt_props { int slope_limit_temp; int esr_pulse_thresh_ma; int esr_meas_curr_ma; + int ki_coeff_full_soc_dischg; int jeita_thresholds[NUM_JEITA_LEVELS]; int ki_coeff_soc[KI_COEFF_SOC_LEVELS]; int ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS]; @@ -401,6 +403,7 @@ struct fg_chip { struct mutex bus_lock; struct mutex sram_rw_lock; struct mutex charge_full_lock; + struct mutex qnovo_esr_ctrl_lock; u32 batt_soc_base; u32 batt_info_base; u32 mem_if_base; @@ -409,7 +412,6 @@ struct fg_chip { int batt_id_ohms; int ki_coeff_full_soc; int charge_status; - int prev_charge_status; int charge_done; int charge_type; int online_status; @@ -434,6 +436,7 @@ struct fg_chip { bool esr_flt_cold_temp_en; bool slope_limit_en; bool use_ima_single_mode; + bool qnovo_enable; struct completion soc_update; struct completion soc_ready; struct delayed_work profile_load_work; diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index 361efd4fbbbd..79a80b6e6c7c 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -576,6 +576,41 @@ static int fg_get_charge_counter(struct fg_chip *chip, int *val) return 0; } +static int fg_get_jeita_threshold(struct fg_chip *chip, + enum jeita_levels level, int *temp_decidegC) +{ + int rc; + u8 val; + u16 reg; + + switch (level) { + case JEITA_COLD: + reg = BATT_INFO_JEITA_TOO_COLD(chip); + break; + case JEITA_COOL: + reg = BATT_INFO_JEITA_COLD(chip); + break; + case JEITA_WARM: + reg = BATT_INFO_JEITA_HOT(chip); + break; + case JEITA_HOT: + reg = BATT_INFO_JEITA_TOO_HOT(chip); + break; + default: + return -EINVAL; + } + + rc = fg_read(chip, reg, &val, 1); + if (rc < 0) { + pr_err("Error in reading jeita level %d, rc=%d\n", level, rc); + return rc; + } + + /* Resolution is 0.5C. Base is -30C. */ + *temp_decidegC = (((5 * val) / 10) - 30) * 10; + return 0; +} + #define BATT_TEMP_NUMR 1 #define BATT_TEMP_DENR 1 static int fg_get_battery_temp(struct fg_chip *chip, int *val) @@ -838,7 +873,7 @@ static int fg_get_prop_capacity(struct fg_chip *chip, int *val) if (rc < 0) return rc; - if (chip->delta_soc > 0) + if (chip->dt.linearize_soc && chip->delta_soc > 0) *val = chip->maint_soc; else *val = msoc; @@ -978,12 +1013,6 @@ static int fg_get_batt_profile(struct fg_chip *chip) return 0; } -static inline void get_temp_setpoint(int threshold, u8 *val) -{ - /* Resolution is 0.5C. Base is -30C. */ - *val = DIV_ROUND_CLOSEST((threshold + 30) * 10, 5); -} - static inline void get_batt_temp_delta(int delta, u8 *val) { switch (delta) { @@ -1602,6 +1631,8 @@ static int fg_adjust_ki_coeff_full_soc(struct fg_chip *chip, int batt_temp) if (batt_temp < 0) ki_coeff_full_soc = 0; + else if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) + ki_coeff_full_soc = chip->dt.ki_coeff_full_soc_dischg; else ki_coeff_full_soc = KI_COEFF_FULL_SOC_DEFAULT; @@ -1658,12 +1689,38 @@ static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv) return 0; } +static int fg_configure_full_soc(struct fg_chip *chip, int bsoc) +{ + int rc; + u8 full_soc[2] = {0xFF, 0xFF}; + + /* + * Once SOC masking condition is cleared, FULL_SOC and MONOTONIC_SOC + * needs to be updated to reflect the same. Write battery SOC to + * FULL_SOC and write a full value to MONOTONIC_SOC. + */ + rc = fg_sram_write(chip, FULL_SOC_WORD, FULL_SOC_OFFSET, + (u8 *)&bsoc, 2, FG_IMA_ATOMIC); + if (rc < 0) { + pr_err("failed to write full_soc rc=%d\n", rc); + return rc; + } + + rc = fg_sram_write(chip, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET, + full_soc, 2, FG_IMA_ATOMIC); + if (rc < 0) { + pr_err("failed to write monotonic_soc rc=%d\n", rc); + return rc; + } + + return 0; +} + #define AUTO_RECHG_VOLT_LOW_LIMIT_MV 3700 static int fg_charge_full_update(struct fg_chip *chip) { union power_supply_propval prop = {0, }; int rc, msoc, bsoc, recharge_soc, msoc_raw; - u8 full_soc[2] = {0xFF, 0xFF}; if (!chip->dt.hold_soc_while_full) return 0; @@ -1693,12 +1750,12 @@ static int fg_charge_full_update(struct fg_chip *chip) /* We need 2 most significant bytes here */ bsoc = (u32)bsoc >> 16; - rc = fg_get_msoc(chip, &msoc); + rc = fg_get_msoc_raw(chip, &msoc_raw); if (rc < 0) { - pr_err("Error in getting msoc, rc=%d\n", rc); + pr_err("Error in getting msoc_raw, rc=%d\n", rc); goto out; } - msoc_raw = DIV_ROUND_CLOSEST(msoc * FULL_SOC_RAW, FULL_CAPACITY); + msoc = DIV_ROUND_CLOSEST(msoc_raw * FULL_CAPACITY, FULL_SOC_RAW); fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n", msoc, bsoc, chip->health, chip->charge_status, @@ -1722,24 +1779,24 @@ static int fg_charge_full_update(struct fg_chip *chip) fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n", msoc); } - } else if (msoc_raw < recharge_soc && chip->charge_full) { - chip->delta_soc = FULL_CAPACITY - msoc; + } else if (msoc_raw <= recharge_soc && chip->charge_full) { + if (chip->dt.linearize_soc) { + chip->delta_soc = FULL_CAPACITY - msoc; - /* - * We're spreading out the delta SOC over every 10% change - * in monotonic SOC. We cannot spread more than 9% in the - * range of 0-100 skipping the first 10%. - */ - if (chip->delta_soc > 9) { - chip->delta_soc = 0; - chip->maint_soc = 0; - } else { - chip->maint_soc = FULL_CAPACITY; - chip->last_msoc = msoc; + /* + * We're spreading out the delta SOC over every 10% + * change in monotonic SOC. We cannot spread more than + * 9% in the range of 0-100 skipping the first 10%. + */ + if (chip->delta_soc > 9) { + chip->delta_soc = 0; + chip->maint_soc = 0; + } else { + chip->maint_soc = FULL_CAPACITY; + chip->last_msoc = msoc; + } } - chip->charge_full = false; - /* * Raise the recharge voltage so that VBAT_LT_RECHG signal * will be asserted soon as battery SOC had dropped below @@ -1752,35 +1809,23 @@ static int fg_charge_full_update(struct fg_chip *chip) rc); goto out; } - fg_dbg(chip, FG_STATUS, "msoc_raw = %d bsoc: %d recharge_soc: %d delta_soc: %d\n", - msoc_raw, bsoc >> 8, recharge_soc, chip->delta_soc); - } else { - goto out; - } - if (!chip->charge_full) - goto out; + /* + * If charge_done is still set, wait for recharging or + * discharging to happen. + */ + if (chip->charge_done) + goto out; - /* - * During JEITA conditions, charge_full can happen early. FULL_SOC - * and MONOTONIC_SOC needs to be updated to reflect the same. Write - * battery SOC to FULL_SOC and write a full value to MONOTONIC_SOC. - */ - rc = fg_sram_write(chip, FULL_SOC_WORD, FULL_SOC_OFFSET, (u8 *)&bsoc, 2, - FG_IMA_ATOMIC); - if (rc < 0) { - pr_err("failed to write full_soc rc=%d\n", rc); - goto out; - } + rc = fg_configure_full_soc(chip, bsoc); + if (rc < 0) + goto out; - rc = fg_sram_write(chip, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET, - full_soc, 2, FG_IMA_ATOMIC); - if (rc < 0) { - pr_err("failed to write monotonic_soc rc=%d\n", rc); - goto out; + chip->charge_full = false; + fg_dbg(chip, FG_STATUS, "msoc_raw = %d bsoc: %d recharge_soc: %d delta_soc: %d\n", + msoc_raw, bsoc >> 8, recharge_soc, chip->delta_soc); } - fg_dbg(chip, FG_STATUS, "Set charge_full to true @ soc %d\n", msoc); out: mutex_unlock(&chip->charge_full_lock); return rc; @@ -1863,6 +1908,44 @@ static int fg_rconn_config(struct fg_chip *chip) return 0; } +static int fg_set_jeita_threshold(struct fg_chip *chip, + enum jeita_levels level, int temp_decidegC) +{ + int rc; + u8 val; + u16 reg; + + if (temp_decidegC < -300 || temp_decidegC > 970) + return -EINVAL; + + /* Resolution is 0.5C. Base is -30C. */ + val = DIV_ROUND_CLOSEST(((temp_decidegC / 10) + 30) * 10, 5); + switch (level) { + case JEITA_COLD: + reg = BATT_INFO_JEITA_TOO_COLD(chip); + break; + case JEITA_COOL: + reg = BATT_INFO_JEITA_COLD(chip); + break; + case JEITA_WARM: + reg = BATT_INFO_JEITA_HOT(chip); + break; + case JEITA_HOT: + reg = BATT_INFO_JEITA_TOO_HOT(chip); + break; + default: + return -EINVAL; + } + + rc = fg_write(chip, reg, &val, 1); + if (rc < 0) { + pr_err("Error in setting jeita level %d, rc=%d\n", level, rc); + return rc; + } + + return 0; +} + static int fg_set_constant_chg_voltage(struct fg_chip *chip, int volt_uv) { u8 buf[2]; @@ -2426,7 +2509,6 @@ static void status_change_work(struct work_struct *work) goto out; } - chip->prev_charge_status = chip->charge_status; chip->charge_status = prop.intval; rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_CHARGE_TYPE, &prop); @@ -3137,6 +3219,9 @@ static int fg_update_maint_soc(struct fg_chip *chip) { int rc = 0, msoc; + if (!chip->dt.linearize_soc) + return 0; + mutex_lock(&chip->charge_full_lock); if (chip->delta_soc <= 0) goto out; @@ -3213,20 +3298,21 @@ static int fg_force_esr_meas(struct fg_chip *chip) int rc; int esr_uohms; + mutex_lock(&chip->qnovo_esr_ctrl_lock); /* force esr extraction enable */ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD, ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), BIT(0), FG_IMA_DEFAULT); if (rc < 0) { pr_err("failed to enable esr extn rc=%d\n", rc); - return rc; + goto out; } rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip), LD_REG_CTRL_BIT, 0); if (rc < 0) { pr_err("Error in configuring qnovo_cfg rc=%d\n", rc); - return rc; + goto out; } rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip), @@ -3234,24 +3320,36 @@ static int fg_force_esr_meas(struct fg_chip *chip) ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT); if (rc < 0) { pr_err("Error in configuring force ESR rc=%d\n", rc); - return rc; + goto out; } + /* + * Release and grab the lock again after 1.5 seconds so that prepare + * callback can succeed if the request comes in between. + */ + mutex_unlock(&chip->qnovo_esr_ctrl_lock); + /* wait 1.5 seconds for hw to measure ESR */ msleep(1500); + + mutex_lock(&chip->qnovo_esr_ctrl_lock); rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip), ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT, 0); if (rc < 0) { pr_err("Error in restoring force ESR rc=%d\n", rc); - return rc; + goto out; } + /* If qnovo is disabled, then leave ESR extraction enabled */ + if (!chip->qnovo_enable) + goto done; + rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip), LD_REG_CTRL_BIT, LD_REG_CTRL_BIT); if (rc < 0) { pr_err("Error in restoring qnovo_cfg rc=%d\n", rc); - return rc; + goto out; } /* force esr extraction disable */ @@ -3260,36 +3358,46 @@ static int fg_force_esr_meas(struct fg_chip *chip) FG_IMA_DEFAULT); if (rc < 0) { pr_err("failed to disable esr extn rc=%d\n", rc); - return rc; + goto out; } +done: fg_get_battery_resistance(chip, &esr_uohms); fg_dbg(chip, FG_STATUS, "ESR uohms = %d\n", esr_uohms); - +out: + mutex_unlock(&chip->qnovo_esr_ctrl_lock); return rc; } static int fg_prepare_for_qnovo(struct fg_chip *chip, int qnovo_enable) { - int rc; + int rc = 0; + mutex_lock(&chip->qnovo_esr_ctrl_lock); /* force esr extraction disable when qnovo enables */ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD, ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), qnovo_enable ? 0 : BIT(0), FG_IMA_DEFAULT); - if (rc < 0) + if (rc < 0) { pr_err("Error in configuring esr extraction rc=%d\n", rc); + goto out; + } rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip), LD_REG_CTRL_BIT, qnovo_enable ? LD_REG_CTRL_BIT : 0); if (rc < 0) { pr_err("Error in configuring qnovo_cfg rc=%d\n", rc); - return rc; + goto out; } - fg_dbg(chip, FG_STATUS, "Prepared for Qnovo\n"); - return 0; + + fg_dbg(chip, FG_STATUS, "%s for Qnovo\n", + qnovo_enable ? "Prepared" : "Unprepared"); + chip->qnovo_enable = qnovo_enable; +out: + mutex_unlock(&chip->qnovo_esr_ctrl_lock); + return rc; } static void ttf_work(struct work_struct *work) @@ -3365,6 +3473,9 @@ static int fg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_CAPACITY: rc = fg_get_prop_capacity(chip, &pval->intval); break; + case POWER_SUPPLY_PROP_CAPACITY_RAW: + rc = fg_get_msoc_raw(chip, &pval->intval); + break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: if (chip->battery_missing) pval->intval = 3700000; @@ -3377,6 +3488,34 @@ static int fg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_TEMP: rc = fg_get_battery_temp(chip, &pval->intval); break; + case POWER_SUPPLY_PROP_COLD_TEMP: + rc = fg_get_jeita_threshold(chip, JEITA_COLD, &pval->intval); + if (rc < 0) { + pr_err("Error in reading jeita_cold, rc=%d\n", rc); + return rc; + } + break; + case POWER_SUPPLY_PROP_COOL_TEMP: + rc = fg_get_jeita_threshold(chip, JEITA_COOL, &pval->intval); + if (rc < 0) { + pr_err("Error in reading jeita_cool, rc=%d\n", rc); + return rc; + } + break; + case POWER_SUPPLY_PROP_WARM_TEMP: + rc = fg_get_jeita_threshold(chip, JEITA_WARM, &pval->intval); + if (rc < 0) { + pr_err("Error in reading jeita_warm, rc=%d\n", rc); + return rc; + } + break; + case POWER_SUPPLY_PROP_HOT_TEMP: + rc = fg_get_jeita_threshold(chip, JEITA_HOT, &pval->intval); + if (rc < 0) { + pr_err("Error in reading jeita_hot, rc=%d\n", rc); + return rc; + } + break; case POWER_SUPPLY_PROP_RESISTANCE: rc = fg_get_battery_resistance(chip, &pval->intval); break; @@ -3500,6 +3639,48 @@ static int fg_psy_set_property(struct power_supply *psy, return -EINVAL; } break; + case POWER_SUPPLY_PROP_CHARGE_FULL: + if (chip->cl.active) { + pr_warn("Capacity learning active!\n"); + return 0; + } + if (pval->intval <= 0 || pval->intval > chip->cl.nom_cap_uah) { + pr_err("charge_full is out of bounds\n"); + return -EINVAL; + } + chip->cl.learned_cc_uah = pval->intval; + rc = fg_save_learned_cap_to_sram(chip); + if (rc < 0) + pr_err("Error in saving learned_cc_uah, rc=%d\n", rc); + break; + case POWER_SUPPLY_PROP_COLD_TEMP: + rc = fg_set_jeita_threshold(chip, JEITA_COLD, pval->intval); + if (rc < 0) { + pr_err("Error in writing jeita_cold, rc=%d\n", rc); + return rc; + } + break; + case POWER_SUPPLY_PROP_COOL_TEMP: + rc = fg_set_jeita_threshold(chip, JEITA_COOL, pval->intval); + if (rc < 0) { + pr_err("Error in writing jeita_cool, rc=%d\n", rc); + return rc; + } + break; + case POWER_SUPPLY_PROP_WARM_TEMP: + rc = fg_set_jeita_threshold(chip, JEITA_WARM, pval->intval); + if (rc < 0) { + pr_err("Error in writing jeita_warm, rc=%d\n", rc); + return rc; + } + break; + case POWER_SUPPLY_PROP_HOT_TEMP: + rc = fg_set_jeita_threshold(chip, JEITA_HOT, pval->intval); + if (rc < 0) { + pr_err("Error in writing jeita_hot, rc=%d\n", rc); + return rc; + } + break; default: break; } @@ -3515,6 +3696,11 @@ static int fg_property_is_writeable(struct power_supply *psy, case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: case POWER_SUPPLY_PROP_CC_STEP: case POWER_SUPPLY_PROP_CC_STEP_SEL: + case POWER_SUPPLY_PROP_CHARGE_FULL: + case POWER_SUPPLY_PROP_COLD_TEMP: + case POWER_SUPPLY_PROP_COOL_TEMP: + case POWER_SUPPLY_PROP_WARM_TEMP: + case POWER_SUPPLY_PROP_HOT_TEMP: return 1; default: break; @@ -3541,6 +3727,7 @@ static int fg_notifier_cb(struct notifier_block *nb, return NOTIFY_OK; if ((strcmp(psy->desc->name, "battery") == 0) + || (strcmp(psy->desc->name, "parallel") == 0) || (strcmp(psy->desc->name, "usb") == 0)) { /* * We cannot vote for awake votable here as that takes @@ -3555,7 +3742,12 @@ static int fg_notifier_cb(struct notifier_block *nb, static enum power_supply_property fg_psy_props[] = { POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_RAW, POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_COLD_TEMP, + POWER_SUPPLY_PROP_COOL_TEMP, + POWER_SUPPLY_PROP_WARM_TEMP, + POWER_SUPPLY_PROP_HOT_TEMP, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_OCV, POWER_SUPPLY_PROP_CURRENT_NOW, @@ -3734,29 +3926,29 @@ static int fg_hw_init(struct fg_chip *chip) } } - get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_COLD], &val); - rc = fg_write(chip, BATT_INFO_JEITA_TOO_COLD(chip), &val, 1); + rc = fg_set_jeita_threshold(chip, JEITA_COLD, + chip->dt.jeita_thresholds[JEITA_COLD] * 10); if (rc < 0) { pr_err("Error in writing jeita_cold, rc=%d\n", rc); return rc; } - get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_COOL], &val); - rc = fg_write(chip, BATT_INFO_JEITA_COLD(chip), &val, 1); + rc = fg_set_jeita_threshold(chip, JEITA_COOL, + chip->dt.jeita_thresholds[JEITA_COOL] * 10); if (rc < 0) { pr_err("Error in writing jeita_cool, rc=%d\n", rc); return rc; } - get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_WARM], &val); - rc = fg_write(chip, BATT_INFO_JEITA_HOT(chip), &val, 1); + rc = fg_set_jeita_threshold(chip, JEITA_WARM, + chip->dt.jeita_thresholds[JEITA_WARM] * 10); if (rc < 0) { pr_err("Error in writing jeita_warm, rc=%d\n", rc); return rc; } - get_temp_setpoint(chip->dt.jeita_thresholds[JEITA_HOT], &val); - rc = fg_write(chip, BATT_INFO_JEITA_TOO_HOT(chip), &val, 1); + rc = fg_set_jeita_threshold(chip, JEITA_HOT, + chip->dt.jeita_thresholds[JEITA_HOT] * 10); if (rc < 0) { pr_err("Error in writing jeita_hot, rc=%d\n", rc); return rc; @@ -4309,7 +4501,11 @@ static int fg_parse_slope_limit_coefficients(struct fg_chip *chip) static int fg_parse_ki_coefficients(struct fg_chip *chip) { struct device_node *node = chip->dev->of_node; - int rc, i; + int rc, i, temp; + + rc = of_property_read_u32(node, "qcom,ki-coeff-full-dischg", &temp); + if (!rc) + chip->dt.ki_coeff_full_soc_dischg = temp; rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-soc-dischg", chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS); @@ -4654,6 +4850,9 @@ static int fg_parse_dt(struct fg_chip *chip) chip->dt.hold_soc_while_full = of_property_read_bool(node, "qcom,hold-soc-while-full"); + chip->dt.linearize_soc = of_property_read_bool(node, + "qcom,linearize-soc"); + rc = fg_parse_ki_coefficients(chip); if (rc < 0) pr_err("Error in parsing Ki coefficients, rc=%d\n", rc); @@ -4759,7 +4958,6 @@ static int fg_gen3_probe(struct platform_device *pdev) chip->debug_mask = &fg_gen3_debug_mask; chip->irqs = fg_irqs; chip->charge_status = -EINVAL; - chip->prev_charge_status = -EINVAL; chip->ki_coeff_full_soc = -EINVAL; chip->online_status = -EINVAL; chip->regmap = dev_get_regmap(chip->dev->parent, NULL); @@ -4832,6 +5030,7 @@ static int fg_gen3_probe(struct platform_device *pdev) mutex_init(&chip->cl.lock); mutex_init(&chip->ttf.lock); mutex_init(&chip->charge_full_lock); + mutex_init(&chip->qnovo_esr_ctrl_lock); init_completion(&chip->soc_update); init_completion(&chip->soc_ready); INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work); @@ -4968,6 +5167,29 @@ static int fg_gen3_remove(struct platform_device *pdev) return 0; } +static void fg_gen3_shutdown(struct platform_device *pdev) +{ + struct fg_chip *chip = dev_get_drvdata(&pdev->dev); + int rc, bsoc; + + if (chip->charge_full) { + rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &bsoc); + if (rc < 0) { + pr_err("Error in getting BATT_SOC, rc=%d\n", rc); + return; + } + + /* We need 2 most significant bytes here */ + bsoc = (u32)bsoc >> 16; + + rc = fg_configure_full_soc(chip, bsoc); + if (rc < 0) { + pr_err("Error in configuring full_soc, rc=%d\n", rc); + return; + } + } +} + static const struct of_device_id fg_gen3_match_table[] = { {.compatible = FG_GEN3_DEV_NAME}, {}, @@ -4982,6 +5204,7 @@ static struct platform_driver fg_gen3_driver = { }, .probe = fg_gen3_probe, .remove = fg_gen3_remove, + .shutdown = fg_gen3_shutdown, }; static int __init fg_gen3_init(void) diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c index c085256a794a..ea205100644d 100644 --- a/drivers/power/supply/qcom/qpnp-smb2.c +++ b/drivers/power/supply/qcom/qpnp-smb2.c @@ -795,6 +795,7 @@ static int smb2_init_usb_main_psy(struct smb2 *chip) *************************/ static enum power_supply_property smb2_dc_props[] = { + POWER_SUPPLY_PROP_INPUT_SUSPEND, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_CURRENT_MAX, @@ -810,6 +811,9 @@ static int smb2_dc_get_prop(struct power_supply *psy, int rc = 0; switch (psp) { + case POWER_SUPPLY_PROP_INPUT_SUSPEND: + val->intval = get_effective_result(chg->dc_suspend_votable); + break; case POWER_SUPPLY_PROP_PRESENT: rc = smblib_get_prop_dc_present(chg, val); break; @@ -841,6 +845,10 @@ static int smb2_dc_set_prop(struct power_supply *psy, int rc = 0; switch (psp) { + case POWER_SUPPLY_PROP_INPUT_SUSPEND: + rc = vote(chg->dc_suspend_votable, WBC_VOTER, + (bool)val->intval, 0); + break; case POWER_SUPPLY_PROP_CURRENT_MAX: rc = smblib_set_prop_dc_current_max(chg, val); break; @@ -1191,7 +1199,7 @@ static int smb2_init_batt_psy(struct smb2 *chip) * VBUS REGULATOR REGISTRATION * ******************************/ -struct regulator_ops smb2_vbus_reg_ops = { +static struct regulator_ops smb2_vbus_reg_ops = { .enable = smblib_vbus_regulator_enable, .disable = smblib_vbus_regulator_disable, .is_enabled = smblib_vbus_regulator_is_enabled, @@ -1233,7 +1241,7 @@ static int smb2_init_vbus_regulator(struct smb2 *chip) * VCONN REGULATOR REGISTRATION * ******************************/ -struct regulator_ops smb2_vconn_reg_ops = { +static struct regulator_ops smb2_vconn_reg_ops = { .enable = smblib_vconn_regulator_enable, .disable = smblib_vconn_regulator_disable, .is_enabled = smblib_vconn_regulator_is_enabled, diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index f9d35ea7775b..df7aabfd7e2e 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -2224,12 +2224,6 @@ int smblib_get_prop_usb_voltage_max(struct smb_charger *chg, int smblib_get_prop_usb_voltage_now(struct smb_charger *chg, union power_supply_propval *val) { - int rc = 0; - - rc = smblib_get_prop_usb_present(chg, val); - if (rc < 0 || !val->intval) - return rc; - if (!chg->iio.usbin_v_chan || PTR_ERR(chg->iio.usbin_v_chan) == -EPROBE_DEFER) chg->iio.usbin_v_chan = iio_channel_get(chg->dev, "usbin_v"); @@ -2772,7 +2766,7 @@ int smblib_set_prop_pd_active(struct smb_charger *chg, hvdcp = stat & QC_CHARGER_BIT; vote(chg->apsd_disable_votable, PD_VOTER, false, 0); vote(chg->pd_allowed_votable, PD_VOTER, true, 0); - vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0); + vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0); vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER, false, 0); @@ -4063,6 +4057,14 @@ irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data) struct smb_charger *chg = irq_data->parent_data; chg->is_hdc = true; + /* + * Disable usb IRQs after the flag set and re-enable IRQs after + * the flag cleared in the delayed work queue, to avoid any IRQ + * storming during the delays + */ + if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq) + disable_irq_nosync(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq); + schedule_delayed_work(&chg->clear_hdc_work, msecs_to_jiffies(60)); return IRQ_HANDLED; @@ -4240,6 +4242,8 @@ static void clear_hdc_work(struct work_struct *work) clear_hdc_work.work); chg->is_hdc = 0; + if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq) + enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq); } static void rdstd_cc2_detach_work(struct work_struct *work) diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h index a89d09711ec8..19c0d19106d6 100644 --- a/drivers/power/supply/qcom/smb-lib.h +++ b/drivers/power/supply/qcom/smb-lib.h @@ -65,6 +65,7 @@ enum print_reason { #define OTG_DELAY_VOTER "OTG_DELAY_VOTER" #define USBIN_I_VOTER "USBIN_I_VOTER" #define WEAK_CHARGER_VOTER "WEAK_CHARGER_VOTER" +#define WBC_VOTER "WBC_VOTER" #define VCONN_MAX_ATTEMPTS 3 #define OTG_MAX_ATTEMPTS 3 diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c index f5c8252b5e41..2dc16aadc4e8 100644 --- a/drivers/power/supply/qcom/smb1351-charger.c +++ b/drivers/power/supply/qcom/smb1351-charger.c @@ -860,7 +860,7 @@ static int smb1351_chg_otg_regulator_is_enable(struct regulator_dev *rdev) return (reg & CMD_OTG_EN_BIT) ? 1 : 0; } -struct regulator_ops smb1351_chg_otg_reg_ops = { +static struct regulator_ops smb1351_chg_otg_reg_ops = { .enable = smb1351_chg_otg_regulator_enable, .disable = smb1351_chg_otg_regulator_disable, .is_enabled = smb1351_chg_otg_regulator_is_enable, diff --git a/drivers/power/supply/qcom/smb135x-charger.c b/drivers/power/supply/qcom/smb135x-charger.c index 08af01544590..3c1b8787ae21 100644 --- a/drivers/power/supply/qcom/smb135x-charger.c +++ b/drivers/power/supply/qcom/smb135x-charger.c @@ -2219,7 +2219,7 @@ static int smb135x_chg_otg_regulator_is_enable(struct regulator_dev *rdev) return (reg & OTG_EN) ? 1 : 0; } -struct regulator_ops smb135x_chg_otg_reg_ops = { +static struct regulator_ops smb135x_chg_otg_reg_ops = { .enable = smb135x_chg_otg_regulator_enable, .disable = smb135x_chg_otg_regulator_disable, .is_enabled = smb135x_chg_otg_regulator_is_enable, diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c index e7ca1e3fb108..a7e7f0be9afc 100644 --- a/drivers/power/supply/qcom/smb138x-charger.c +++ b/drivers/power/supply/qcom/smb138x-charger.c @@ -111,7 +111,7 @@ module_param_named( debug_mask, __debug_mask, int, S_IRUSR | S_IWUSR ); -irqreturn_t smb138x_handle_slave_chg_state_change(int irq, void *data) +static irqreturn_t smb138x_handle_slave_chg_state_change(int irq, void *data) { struct smb_irq_data *irq_data = data; struct smb138x *chip = irq_data->parent_data; @@ -727,7 +727,7 @@ static int smb138x_init_parallel_psy(struct smb138x *chip) * VBUS REGULATOR REGISTRATION * ******************************/ -struct regulator_ops smb138x_vbus_reg_ops = { +static struct regulator_ops smb138x_vbus_reg_ops = { .enable = smblib_vbus_regulator_enable, .disable = smblib_vbus_regulator_disable, .is_enabled = smblib_vbus_regulator_is_enabled, @@ -769,7 +769,7 @@ static int smb138x_init_vbus_regulator(struct smb138x *chip) * VCONN REGULATOR REGISTRATION * ******************************/ -struct regulator_ops smb138x_vconn_reg_ops = { +static struct regulator_ops smb138x_vconn_reg_ops = { .enable = smblib_vconn_regulator_enable, .disable = smblib_vconn_regulator_disable, .is_enabled = smblib_vconn_regulator_is_enabled, diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c index 06ecc7ea6e8a..acc0d772d44d 100644 --- a/drivers/power/supply/qcom/step-chg-jeita.c +++ b/drivers/power/supply/qcom/step-chg-jeita.c @@ -356,11 +356,21 @@ static void status_change_work(struct work_struct *work) int reschedule_us; int reschedule_jeita_work_us = 0; int reschedule_step_work_us = 0; + union power_supply_propval pval = {0, }; + + if (!is_batt_available(chip)) { + __pm_relax(chip->step_chg_ws); + return; + } - if (!is_batt_available(chip)) + /* skip jeita and step if not charging */ + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_STATUS, &pval); + if (pval.intval != POWER_SUPPLY_STATUS_CHARGING) { + __pm_relax(chip->step_chg_ws); return; + } - /* skip elapsed_us debounce for handling battery temperature */ rc = handle_jeita(chip); if (rc > 0) reschedule_jeita_work_us = rc; diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c index 2531b74b4588..5e808150a3dd 100644 --- a/drivers/pwm/pwm-qpnp.c +++ b/drivers/pwm/pwm-qpnp.c @@ -1475,7 +1475,7 @@ static void qpnp_pwm_disable(struct pwm_chip *pwm_chip, */ int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode) { - int rc; + int rc = 0; unsigned long flags; struct qpnp_pwm_chip *chip; diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c index ecf7885a4bff..4d6d63e6d887 100644 --- a/drivers/regulator/cprh-kbss-regulator.c +++ b/drivers/regulator/cprh-kbss-regulator.c @@ -80,7 +80,7 @@ struct cprh_kbss_fuses { * Fuse combos 24 - 31 map to CPR fusing revision 0 - 7 with speed bin fuse = 3. */ #define CPRH_MSM8998_KBSS_FUSE_COMBO_COUNT 32 -#define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT 16 +#define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT 32 #define CPRH_SDM630_KBSS_FUSE_COMBO_COUNT 24 /* @@ -1069,6 +1069,12 @@ static int cprh_kbss_calculate_open_loop_voltages(struct cpr3_regulator *vreg) CPRH_KBSS_FUSE_STEP_VOLT, fuse->init_voltage[i], CPRH_KBSS_VOLTAGE_FUSE_SIZE); + /* SDM660 speed bin #3 does not support TURBO_L1/L2 */ + if (soc_revision == SDM660_SOC_ID && vreg->speed_bin_fuse == 3 + && (id == CPRH_KBSS_PERFORMANCE_CLUSTER_ID) + && (i == CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO_L2)) + continue; + /* Log fused open-loop voltage values for debugging purposes. */ cpr3_info(vreg, "fused %8s: open-loop=%7d uV\n", corner_name[i], fuse_volt[i]); @@ -1615,6 +1621,11 @@ static int cprh_kbss_calculate_target_quotients(struct cpr3_regulator *vreg) CPRH_SDM660_PERF_KBSS_FUSE_CORNER_SVS; highest_fuse_corner = CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO_L2; + + /* speed-bin 3 does not have Turbo_L2 fuse */ + if (vreg->speed_bin_fuse == 3) + highest_fuse_corner = + CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO; } break; case SDM630_SOC_ID: diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 6b942d9e5b74..1ed85dfc008d 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -329,12 +329,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj, struct qla_hw_data *ha = vha->hw; ssize_t rval = 0; + mutex_lock(&ha->optrom_mutex); + if (ha->optrom_state != QLA_SREADING) - return 0; + goto out; - mutex_lock(&ha->optrom_mutex); rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, ha->optrom_region_size); + +out: mutex_unlock(&ha->optrom_mutex); return rval; @@ -349,14 +352,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; - if (ha->optrom_state != QLA_SWRITING) + mutex_lock(&ha->optrom_mutex); + + if (ha->optrom_state != QLA_SWRITING) { + mutex_unlock(&ha->optrom_mutex); return -EINVAL; - if (off > ha->optrom_region_size) + } + if (off > ha->optrom_region_size) { + mutex_unlock(&ha->optrom_mutex); return -ERANGE; + } if (off + count > ha->optrom_region_size) count = ha->optrom_region_size - off; - mutex_lock(&ha->optrom_mutex); memcpy(&ha->optrom_buffer[off], buf, count); mutex_unlock(&ha->optrom_mutex); diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 460213536e14..9d532691f001 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -21,6 +21,7 @@ #define MAX_UFS_QCOM_HOSTS 1 #define MAX_U32 (~(u32)0) #define MPHY_TX_FSM_STATE 0x41 +#define MPHY_RX_FSM_STATE 0xC1 #define TX_FSM_HIBERN8 0x1 #define HBRN8_POLL_TOUT_MS 100 #define DEFAULT_CLK_RATE_HZ 1000000 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index c23023f43d30..35575c071760 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -173,6 +173,9 @@ void ufshcd_update_query_stats(struct ufs_hba *hba, } #endif +#define PWR_INFO_MASK 0xF +#define PWR_RX_OFFSET 4 + #define UFSHCD_REQ_SENSE_SIZE 18 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ @@ -839,6 +842,24 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) } } +static void ufshcd_print_fsm_state(struct ufs_hba *hba) +{ + int err = 0, tx_fsm_val = 0, rx_fsm_val = 0; + + err = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, + UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)), + &tx_fsm_val); + dev_err(hba->dev, "%s: TX_FSM_STATE = %u, err = %d\n", __func__, + tx_fsm_val, err); + err = ufshcd_dme_get(hba, + UIC_ARG_MIB_SEL(MPHY_RX_FSM_STATE, + UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), + &rx_fsm_val); + dev_err(hba->dev, "%s: RX_FSM_STATE = %u, err = %d\n", __func__, + rx_fsm_val, err); +} + static void ufshcd_print_host_state(struct ufs_hba *hba) { if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN)) @@ -863,6 +884,7 @@ static void ufshcd_print_host_state(struct ufs_hba *hba) hba->capabilities, hba->caps); dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks, hba->dev_quirks); + ufshcd_print_fsm_state(hba); } /** @@ -4634,8 +4656,9 @@ int ufshcd_change_power_mode(struct ufs_hba *hba, int ret = 0; /* if already configured to the requested pwr_mode */ - if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && - pwr_mode->gear_tx == hba->pwr_info.gear_tx && + if (!hba->restore_needed && + pwr_mode->gear_rx == hba->pwr_info.gear_rx && + pwr_mode->gear_tx == hba->pwr_info.gear_tx && pwr_mode->lane_rx == hba->pwr_info.lane_rx && pwr_mode->lane_tx == hba->pwr_info.lane_tx && pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && @@ -6256,6 +6279,52 @@ static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist, reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH; } +static void ufshcd_rls_handler(struct work_struct *work) +{ + struct ufs_hba *hba; + int ret = 0; + u32 mode; + + hba = container_of(work, struct ufs_hba, rls_work); + ufshcd_scsi_block_requests(hba); + pm_runtime_get_sync(hba->dev); + ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX); + if (ret) { + dev_err(hba->dev, + "Timed out (%d) waiting for DB to clear\n", + ret); + goto out; + } + + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode); + if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK)) + hba->restore_needed = true; + + if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK)) + hba->restore_needed = true; + + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode); + if (hba->pwr_info.gear_rx != mode) + hba->restore_needed = true; + + ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode); + if (hba->pwr_info.gear_tx != mode) + hba->restore_needed = true; + + if (hba->restore_needed) + ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info)); + + if (ret) + dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", + __func__, ret); + else + hba->restore_needed = false; + +out: + ufshcd_scsi_unblock_requests(hba); + pm_runtime_put_sync(hba->dev); +} + /** * ufshcd_update_uic_error - check and set fatal UIC error flags. * @hba: per-adapter instance @@ -6295,6 +6364,8 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) hba->full_init_linereset = true; } } + if (!hba->full_init_linereset) + schedule_work(&hba->rls_work); } retval |= IRQ_HANDLED; } @@ -8689,6 +8760,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto enable_gating; } + flush_work(&hba->eeh_work); ret = ufshcd_link_state_transition(hba, req_link_state, 1); if (ret) goto set_dev_active; @@ -9902,6 +9974,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Initialize work queues */ INIT_WORK(&hba->eh_work, ufshcd_err_handler); INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); + INIT_WORK(&hba->rls_work, ufshcd_rls_handler); /* Initialize UIC command mutex */ mutex_init(&hba->uic_cmd_mutex); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index da3ad78d3405..dbc80848ed8b 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -854,6 +854,7 @@ struct ufs_hba { /* Work Queues */ struct work_struct eh_work; struct work_struct eeh_work; + struct work_struct rls_work; /* HBA Errors */ u32 errors; @@ -950,9 +951,10 @@ struct ufs_hba { bool full_init_linereset; struct pinctrl *pctrl; - + int latency_hist_enabled; struct io_latency_state io_lat_s; + bool restore_needed; }; static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba) diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 907960cfa9d5..829876226689 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -1,6 +1,8 @@ # # QCOM Soc drivers # +source "drivers/soc/qcom/hab/Kconfig" + config MSM_INRUSH_CURRENT_MITIGATION bool "Inrush-current mitigation Driver" help diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 0bf54bedd6ea..229b13a04819 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -107,3 +107,4 @@ obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o obj-$(CONFIG_QCOM_EARLY_RANDOM) += early_random.o obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o obj-$(CONFIG_MSM_CACHE_M4M_ERP64) += cache_m4m_erp64.o +obj-$(CONFIG_MSM_HAB) += hab/ diff --git a/drivers/soc/qcom/common_log.c b/drivers/soc/qcom/common_log.c index 1e8744b41e4c..11ca86a4ba41 100644 --- a/drivers/soc/qcom/common_log.c +++ b/drivers/soc/qcom/common_log.c @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <linux/kmemleak.h> #include <linux/async.h> +#include <linux/thread_info.h> #include <soc/qcom/memory_dump.h> #include <soc/qcom/minidump.h> #include <asm/sections.h> @@ -256,6 +257,32 @@ static void __init register_kernel_sections(void) } } +#ifdef CONFIG_QCOM_MINIDUMP +void dump_stack_minidump(u64 sp) +{ + struct md_region ksp_entry, ktsk_entry; + u32 cpu = smp_processor_id(); + + if (sp < KIMAGE_VADDR || sp > -256UL) + sp = current_stack_pointer; + + sp &= ~(THREAD_SIZE - 1); + scnprintf(ksp_entry.name, sizeof(ksp_entry.name), "KSTACK%d", cpu); + ksp_entry.virt_addr = sp; + ksp_entry.phys_addr = virt_to_phys((uintptr_t *)sp); + ksp_entry.size = THREAD_SIZE; + if (msm_minidump_add_region(&ksp_entry)) + pr_err("Failed to add stack of cpu %d in Minidump\n", cpu); + + scnprintf(ktsk_entry.name, sizeof(ktsk_entry.name), "KTASK%d", cpu); + ktsk_entry.virt_addr = (u64)current; + ktsk_entry.phys_addr = virt_to_phys((uintptr_t *)current); + ktsk_entry.size = sizeof(struct task_struct); + if (msm_minidump_add_region(&ktsk_entry)) + pr_err("Failed to add current task %d in Minidump\n", cpu); +} +#endif + static void __init async_common_log_init(void *data, async_cookie_t cookie) { register_kernel_sections(); diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c index e0d9f68ceef9..ad9bf3a2232d 100644 --- a/drivers/soc/qcom/glink.c +++ b/drivers/soc/qcom/glink.c @@ -1667,6 +1667,8 @@ void ch_purge_intent_lists(struct channel_ctx *ctx) &ctx->local_rx_intent_list, list) { ctx->notify_rx_abort(ctx, ctx->user_priv, ptr_intent->pkt_priv); + ctx->transport_ptr->ops->deallocate_rx_intent( + ctx->transport_ptr->ops, ptr_intent); list_del(&ptr_intent->list); kfree(ptr_intent); } @@ -2372,6 +2374,35 @@ static void dummy_tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr, } /** + * dummy_tx_cmd_ch_open() - dummy channel open cmd sending function + * @if_ptr: The transport to transmit on. + * @lcid: The local channel id to encode. + * @name: The channel name to encode. + * @req_xprt: The transport the core would like to migrate this channel to. + * + * Return: 0 on success or standard Linux error code. + */ +static int dummy_tx_cmd_ch_open(struct glink_transport_if *if_ptr, + uint32_t lcid, const char *name, + uint16_t req_xprt) +{ + return -EOPNOTSUPP; +} + +/** + * dummy_tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire + * format and transmit + * @if_ptr: The transport to transmit on. + * @rcid: The remote channel id to encode. + * @xprt_resp: The response to a transport migration request. + */ +static void dummy_tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr, + uint32_t rcid, uint16_t xprt_resp) +{ + /* intentionally left blank */ +} + +/** * dummy_get_power_vote_ramp_time() - Dummy Power vote ramp time * @if_ptr: The transport to transmit on. * @state: The power state being requested from the transport. @@ -3736,6 +3767,8 @@ static void glink_dummy_xprt_ctx_release(struct rwref_lock *xprt_st_lock) GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__, xprt_ctx->name, xprt_ctx->edge); + kfree(xprt_ctx->ops); + xprt_ctx->ops = NULL; kfree(xprt_ctx); } @@ -4184,8 +4217,14 @@ static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx( if_ptr->tx_cmd_remote_rx_intent_req_ack = dummy_tx_cmd_remote_rx_intent_req_ack; if_ptr->tx_cmd_set_sigs = dummy_tx_cmd_set_sigs; + if_ptr->tx_cmd_ch_open = dummy_tx_cmd_ch_open; + if_ptr->tx_cmd_ch_remote_open_ack = dummy_tx_cmd_ch_remote_open_ack; if_ptr->tx_cmd_ch_close = dummy_tx_cmd_ch_close; if_ptr->tx_cmd_ch_remote_close_ack = dummy_tx_cmd_ch_remote_close_ack; + if_ptr->tx_cmd_tracer_pkt = dummy_tx_cmd_tracer_pkt; + if_ptr->get_power_vote_ramp_time = dummy_get_power_vote_ramp_time; + if_ptr->power_vote = dummy_power_vote; + if_ptr->power_unvote = dummy_power_unvote; xprt_ptr->ops = if_ptr; xprt_ptr->log_ctx = log_ctx; diff --git a/drivers/soc/qcom/hab/Kconfig b/drivers/soc/qcom/hab/Kconfig new file mode 100644 index 000000000000..2e4f5114e29f --- /dev/null +++ b/drivers/soc/qcom/hab/Kconfig @@ -0,0 +1,7 @@ +config MSM_HAB + bool "Enable Multimedia driver Hypervisor Abstraction Layer" + help + Multimedia driver hypervisor abstraction layer. + Required for drivers to use the HAB API to communicate with the host + OS. + diff --git a/drivers/soc/qcom/hab/Makefile b/drivers/soc/qcom/hab/Makefile new file mode 100644 index 000000000000..83fc54d42202 --- /dev/null +++ b/drivers/soc/qcom/hab/Makefile @@ -0,0 +1,14 @@ +msm_hab-objs = \ + khab.o \ + hab.o \ + hab_msg.o \ + hab_vchan.o \ + hab_pchan.o \ + hab_open.o \ + hab_mimex.o \ + hab_mem_linux.o \ + hab_pipe.o \ + qvm_comm.o \ + hab_qvm.o + +obj-$(CONFIG_MSM_HAB) += msm_hab.o diff --git a/drivers/soc/qcom/hab/hab.c b/drivers/soc/qcom/hab/hab.c new file mode 100644 index 000000000000..c6df36f5c0a2 --- /dev/null +++ b/drivers/soc/qcom/hab/hab.c @@ -0,0 +1,726 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include "hab.h" + +#define HAB_DEVICE_CNSTR(__name__, __id__, __num__) { \ + .name = __name__,\ + .id = __id__,\ + .pchannels = LIST_HEAD_INIT(hab_devices[__num__].pchannels),\ + .pchan_lock = __MUTEX_INITIALIZER(hab_devices[__num__].pchan_lock),\ + .openq_list = LIST_HEAD_INIT(hab_devices[__num__].openq_list),\ + .openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\ + } + +/* the following has to match habmm definitions, order does not matter */ +static struct hab_device hab_devices[] = { + HAB_DEVICE_CNSTR(DEVICE_AUD1_NAME, MM_AUD_1, 0), + HAB_DEVICE_CNSTR(DEVICE_AUD2_NAME, MM_AUD_2, 1), + HAB_DEVICE_CNSTR(DEVICE_AUD3_NAME, MM_AUD_3, 2), + HAB_DEVICE_CNSTR(DEVICE_AUD4_NAME, MM_AUD_4, 3), + HAB_DEVICE_CNSTR(DEVICE_CAM_NAME, MM_CAM, 4), + HAB_DEVICE_CNSTR(DEVICE_DISP1_NAME, MM_DISP_1, 5), + HAB_DEVICE_CNSTR(DEVICE_DISP2_NAME, MM_DISP_2, 6), + HAB_DEVICE_CNSTR(DEVICE_DISP3_NAME, MM_DISP_3, 7), + HAB_DEVICE_CNSTR(DEVICE_DISP4_NAME, MM_DISP_4, 8), + HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 9), + HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 10), + HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 11), + HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 12), + HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 13), + HAB_DEVICE_CNSTR(DEVICE_QCPE2_NAME, MM_QCPE_VM2, 14), + HAB_DEVICE_CNSTR(DEVICE_QCPE3_NAME, MM_QCPE_VM3, 15), + HAB_DEVICE_CNSTR(DEVICE_QCPE4_NAME, MM_QCPE_VM4, 16) +}; + +struct hab_driver hab_driver = { + .ndevices = ARRAY_SIZE(hab_devices), + .devp = hab_devices, +}; + +struct uhab_context *hab_ctx_alloc(int kernel) +{ + struct uhab_context *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + ctx->closing = 0; + INIT_LIST_HEAD(&ctx->vchannels); + INIT_LIST_HEAD(&ctx->exp_whse); + INIT_LIST_HEAD(&ctx->imp_whse); + + INIT_LIST_HEAD(&ctx->exp_rxq); + init_waitqueue_head(&ctx->exp_wq); + spin_lock_init(&ctx->expq_lock); + + spin_lock_init(&ctx->imp_lock); + rwlock_init(&ctx->exp_lock); + rwlock_init(&ctx->ctx_lock); + + kref_init(&ctx->refcount); + ctx->import_ctx = habmem_imp_hyp_open(); + if (!ctx->import_ctx) { + kfree(ctx); + return NULL; + } + ctx->kernel = kernel; + + return ctx; +} + +void hab_ctx_free(struct kref *ref) +{ + struct uhab_context *ctx = + container_of(ref, struct uhab_context, refcount); + struct hab_export_ack_recvd *ack_recvd, *tmp; + + habmem_imp_hyp_close(ctx->import_ctx, ctx->kernel); + + list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) { + list_del(&ack_recvd->node); + kfree(ack_recvd); + } + + kfree(ctx); +} + +struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid, + struct uhab_context *ctx) +{ + struct virtual_channel *vchan; + + read_lock(&ctx->ctx_lock); + list_for_each_entry(vchan, &ctx->vchannels, node) { + if (vcid == vchan->id) { + kref_get(&vchan->refcount); + read_unlock(&ctx->ctx_lock); + return vchan; + } + } + read_unlock(&ctx->ctx_lock); + return NULL; +} + +static struct hab_device *find_hab_device(unsigned int mm_id) +{ + int i; + + for (i = 0; i < hab_driver.ndevices; i++) { + if (hab_driver.devp[i].id == HAB_MMID_GET_MAJOR(mm_id)) + return &hab_driver.devp[i]; + } + + pr_err("find_hab_device failed: id=%d\n", mm_id); + return NULL; +} +/* + * open handshake in FE and BE + + * frontend backend + * send(INIT) wait(INIT) + * wait(INIT_ACK) send(INIT_ACK) + * send(ACK) wait(ACK) + + */ +struct virtual_channel *frontend_open(struct uhab_context *ctx, + unsigned int mm_id, + int dom_id) +{ + int ret, open_id = 0; + struct physical_channel *pchan = NULL; + struct hab_device *dev; + struct virtual_channel *vchan = NULL; + static atomic_t open_id_counter = ATOMIC_INIT(0); + struct hab_open_request request; + struct hab_open_request *recv_request; + int sub_id = HAB_MMID_GET_MINOR(mm_id); + + dev = find_hab_device(mm_id); + if (dev == NULL) { + ret = -EINVAL; + goto err; + } + + pchan = hab_pchan_find_domid(dev, dom_id); + if (!pchan) { + pr_err("hab_pchan_find_domid failed: dom_id=%d\n", dom_id); + ret = -EINVAL; + goto err; + } + + vchan = hab_vchan_alloc(ctx, pchan); + if (!vchan) { + ret = -ENOMEM; + goto err; + } + + /* Send Init sequence */ + open_id = atomic_inc_return(&open_id_counter); + hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT, pchan, + vchan->id, sub_id, open_id); + ret = hab_open_request_send(&request); + if (ret) { + pr_err("hab_open_request_send failed: %d\n", ret); + goto err; + } + + /* Wait for Init-Ack sequence */ + hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK, pchan, + 0, sub_id, open_id); + ret = hab_open_listen(ctx, dev, &request, &recv_request, 0); + if (ret || !recv_request) { + pr_err("hab_open_listen failed: %d\n", ret); + goto err; + } + + vchan->otherend_id = recv_request->vchan_id; + hab_open_request_free(recv_request); + + /* Send Ack sequence */ + hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK, pchan, + 0, sub_id, open_id); + ret = hab_open_request_send(&request); + if (ret) + goto err; + + hab_pchan_put(pchan); + + return vchan; +err: + if (vchan) + hab_vchan_put(vchan); + if (pchan) + hab_pchan_put(pchan); + + return ERR_PTR(ret); +} + +struct virtual_channel *backend_listen(struct uhab_context *ctx, + unsigned int mm_id) +{ + int ret; + int open_id; + int sub_id = HAB_MMID_GET_MINOR(mm_id); + struct physical_channel *pchan = NULL; + struct hab_device *dev; + struct virtual_channel *vchan = NULL; + struct hab_open_request request; + struct hab_open_request *recv_request; + uint32_t otherend_vchan_id; + + dev = find_hab_device(mm_id); + if (dev == NULL) { + ret = -EINVAL; + goto err; + } + + while (1) { + /* Wait for Init sequence */ + hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT, + NULL, 0, sub_id, 0); + ret = hab_open_listen(ctx, dev, &request, &recv_request, 0); + if (ret || !recv_request) { + pr_err("hab_open_listen failed: %d\n", ret); + goto err; + } + + otherend_vchan_id = recv_request->vchan_id; + open_id = recv_request->open_id; + pchan = recv_request->pchan; + hab_pchan_get(pchan); + hab_open_request_free(recv_request); + + vchan = hab_vchan_alloc(ctx, pchan); + if (!vchan) { + ret = -ENOMEM; + goto err; + } + + vchan->otherend_id = otherend_vchan_id; + + /* Send Init-Ack sequence */ + hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK, + pchan, vchan->id, sub_id, open_id); + ret = hab_open_request_send(&request); + if (ret) + goto err; + + /* Wait for Ack sequence */ + hab_open_request_init(&request, HAB_PAYLOAD_TYPE_ACK, + pchan, 0, sub_id, open_id); + ret = hab_open_listen(ctx, dev, &request, &recv_request, HZ); + + if (ret != -EAGAIN) + break; + + hab_vchan_put(vchan); + vchan = NULL; + hab_pchan_put(pchan); + pchan = NULL; + } + + if (ret || !recv_request) { + pr_err("backend_listen failed: %d\n", ret); + ret = -EINVAL; + goto err; + } + + hab_open_request_free(recv_request); + hab_pchan_put(pchan); + return vchan; +err: + if (vchan) + hab_vchan_put(vchan); + if (pchan) + hab_pchan_put(pchan); + return ERR_PTR(ret); +} + +long hab_vchan_send(struct uhab_context *ctx, + int vcid, + size_t sizebytes, + void *data, + unsigned int flags) +{ + struct virtual_channel *vchan; + int ret; + struct hab_header header = HAB_HEADER_INITIALIZER; + int nonblocking_flag = flags & HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING; + + if (sizebytes > HAB_MAX_MSG_SIZEBYTES) { + pr_err("Message too large, %lu bytes\n", sizebytes); + return -EINVAL; + } + + vchan = hab_get_vchan_fromvcid(vcid, ctx); + if (!vchan || vchan->otherend_closed) + return -ENODEV; + + HAB_HEADER_SET_SIZE(header, sizebytes); + HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG); + HAB_HEADER_SET_ID(header, vchan->otherend_id); + + while (1) { + ret = physical_channel_send(vchan->pchan, &header, data); + + if (vchan->otherend_closed || nonblocking_flag || + ret != -EAGAIN) + break; + + schedule(); + } + + hab_vchan_put(vchan); + return ret; +} + +struct hab_message *hab_vchan_recv(struct uhab_context *ctx, + int vcid, + unsigned int flags) +{ + struct virtual_channel *vchan; + struct hab_message *message; + int ret = 0; + int nonblocking_flag = flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING; + + vchan = hab_get_vchan_fromvcid(vcid, ctx); + if (!vchan || vchan->otherend_closed) + return ERR_PTR(-ENODEV); + + if (nonblocking_flag) { + /* + * Try to pull data from the ring in this context instead of + * IRQ handler. Any available messages will be copied and queued + * internally, then fetched by hab_msg_dequeue() + */ + physical_channel_rx_dispatch((unsigned long) vchan->pchan); + } + + message = hab_msg_dequeue(vchan, !nonblocking_flag); + if (!message) { + if (nonblocking_flag) + ret = -EAGAIN; + else + ret = -EPIPE; + } + + hab_vchan_put(vchan); + return ret ? ERR_PTR(ret) : message; +} + +bool hab_is_loopback(void) +{ + return hab_driver.b_loopback; +} + +int hab_vchan_open(struct uhab_context *ctx, + unsigned int mmid, + int32_t *vcid, + uint32_t flags) +{ + struct virtual_channel *vchan; + + if (!vcid) + return -EINVAL; + + if (hab_is_loopback()) { + if (!hab_driver.loopback_num) { + hab_driver.loopback_num = 1; + vchan = backend_listen(ctx, mmid); + } else { + hab_driver.loopback_num = 0; + vchan = frontend_open(ctx, mmid, LOOPBACK_DOM); + } + } else { + if (hab_driver.b_server_dom) + vchan = backend_listen(ctx, mmid); + else + vchan = frontend_open(ctx, mmid, 0); + } + + if (IS_ERR(vchan)) + return PTR_ERR(vchan); + + write_lock(&ctx->ctx_lock); + list_add_tail(&vchan->node, &ctx->vchannels); + write_unlock(&ctx->ctx_lock); + + *vcid = vchan->id; + + return 0; +} + +void hab_send_close_msg(struct virtual_channel *vchan) +{ + struct hab_header header; + + if (vchan && !vchan->otherend_closed) { + HAB_HEADER_SET_SIZE(header, 0); + HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_CLOSE); + HAB_HEADER_SET_ID(header, vchan->otherend_id); + physical_channel_send(vchan->pchan, &header, NULL); + } +} + +static void hab_vchan_close_impl(struct kref *ref) +{ + struct virtual_channel *vchan = + container_of(ref, struct virtual_channel, usagecnt); + + list_del(&vchan->node); + hab_vchan_stop_notify(vchan); + hab_vchan_put(vchan); +} + + +void hab_vchan_close(struct uhab_context *ctx, int32_t vcid) +{ + struct virtual_channel *vchan, *tmp; + + if (!ctx) + return; + + write_lock(&ctx->ctx_lock); + list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) { + if (vchan->id == vcid) { + kref_put(&vchan->usagecnt, hab_vchan_close_impl); + break; + } + } + + write_unlock(&ctx->ctx_lock); +} + +static int hab_open(struct inode *inodep, struct file *filep) +{ + int result = 0; + struct uhab_context *ctx; + + ctx = hab_ctx_alloc(0); + + if (!ctx) { + pr_err("hab_ctx_alloc failed\n"); + filep->private_data = NULL; + return -ENOMEM; + } + + filep->private_data = ctx; + + return result; +} + +static int hab_release(struct inode *inodep, struct file *filep) +{ + struct uhab_context *ctx = filep->private_data; + struct virtual_channel *vchan, *tmp; + + if (!ctx) + return 0; + + write_lock(&ctx->ctx_lock); + + list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) { + list_del(&vchan->node); + hab_vchan_stop_notify(vchan); + hab_vchan_put(vchan); + } + + write_unlock(&ctx->ctx_lock); + + hab_ctx_put(ctx); + filep->private_data = NULL; + + return 0; +} + +static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + struct uhab_context *ctx = (struct uhab_context *)filep->private_data; + struct hab_open *open_param; + struct hab_close *close_param; + struct hab_recv *recv_param; + struct hab_send *send_param; + struct hab_message *msg; + void *send_data; + unsigned char data[256] = { 0 }; + long ret = 0; + + if (_IOC_SIZE(cmd) && (cmd & IOC_IN)) { + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + if (copy_from_user(data, (void __user *)arg, _IOC_SIZE(cmd))) { + pr_err("copy_from_user failed cmd=%x size=%d\n", + cmd, _IOC_SIZE(cmd)); + return -EFAULT; + } + } + + switch (cmd) { + case IOCTL_HAB_VC_OPEN: + open_param = (struct hab_open *)data; + ret = hab_vchan_open(ctx, open_param->mmid, + &open_param->vcid, open_param->flags); + break; + case IOCTL_HAB_VC_CLOSE: + close_param = (struct hab_close *)data; + hab_vchan_close(ctx, close_param->vcid); + break; + case IOCTL_HAB_SEND: + send_param = (struct hab_send *)data; + if (send_param->sizebytes > HAB_MAX_MSG_SIZEBYTES) { + ret = -EINVAL; + break; + } + + send_data = kzalloc(send_param->sizebytes, GFP_TEMPORARY); + if (!send_data) { + ret = -ENOMEM; + break; + } + + if (copy_from_user(send_data, (void __user *)send_param->data, + send_param->sizebytes)) { + ret = -EFAULT; + } else { + ret = hab_vchan_send(ctx, send_param->vcid, + send_param->sizebytes, + send_data, + send_param->flags); + } + kfree(send_data); + break; + case IOCTL_HAB_RECV: + recv_param = (struct hab_recv *)data; + if (!recv_param->data) { + ret = -EINVAL; + break; + } + + msg = hab_vchan_recv(ctx, recv_param->vcid, recv_param->flags); + + if (IS_ERR(msg)) { + recv_param->sizebytes = 0; + ret = PTR_ERR(msg); + break; + } + + if (recv_param->sizebytes < msg->sizebytes) { + recv_param->sizebytes = 0; + ret = -EINVAL; + } else if (copy_to_user((void __user *)recv_param->data, + msg->data, + msg->sizebytes)) { + pr_err("copy_to_user failed: vc=%x size=%d\n", + recv_param->vcid, (int)msg->sizebytes); + recv_param->sizebytes = 0; + ret = -EFAULT; + } else { + recv_param->sizebytes = msg->sizebytes; + } + + hab_msg_free(msg); + break; + case IOCTL_HAB_VC_EXPORT: + ret = hab_mem_export(ctx, (struct hab_export *)data, 0); + break; + case IOCTL_HAB_VC_IMPORT: + ret = hab_mem_import(ctx, (struct hab_import *)data, 0); + break; + case IOCTL_HAB_VC_UNEXPORT: + ret = hab_mem_unexport(ctx, (struct hab_unexport *)data, 0); + break; + case IOCTL_HAB_VC_UNIMPORT: + ret = hab_mem_unimport(ctx, (struct hab_unimport *)data, 0); + break; + default: + ret = -ENOIOCTLCMD; + } + + if (ret == 0 && _IOC_SIZE(cmd) && (cmd & IOC_OUT)) + if (copy_to_user((void __user *) arg, data, _IOC_SIZE(cmd))) { + pr_err("copy_to_user failed: cmd=%x\n", cmd); + ret = -EFAULT; + } + + return ret; +} + +static const struct file_operations hab_fops = { + .owner = THIS_MODULE, + .open = hab_open, + .release = hab_release, + .mmap = habmem_imp_hyp_mmap, + .unlocked_ioctl = hab_ioctl +}; + +/* + * These map sg functions are pass through because the memory backing the + * sg list is already accessible to the kernel as they come from a the + * dedicated shared vm pool + */ + +static int hab_map_sg(struct device *dev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + /* return nelems directly */ + return nelems; +} + +static void hab_unmap_sg(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + /*Do nothing */ +} + +static const struct dma_map_ops hab_dma_ops = { + .map_sg = hab_map_sg, + .unmap_sg = hab_unmap_sg, +}; + +static int __init hab_init(void) +{ + int result; + int i; + dev_t dev; + struct hab_device *device; + + result = alloc_chrdev_region(&hab_driver.major, 0, 1, "hab"); + + if (result < 0) { + pr_err("alloc_chrdev_region failed: %d\n", result); + return result; + } + + cdev_init(&hab_driver.cdev, &hab_fops); + hab_driver.cdev.owner = THIS_MODULE; + hab_driver.cdev.ops = &hab_fops; + dev = MKDEV(MAJOR(hab_driver.major), 0); + + result = cdev_add(&hab_driver.cdev, dev, 1); + + if (result < 0) { + unregister_chrdev_region(dev, 1); + pr_err("cdev_add failed: %d\n", result); + return result; + } + + hab_driver.class = class_create(THIS_MODULE, "hab"); + + if (IS_ERR(hab_driver.class)) { + result = PTR_ERR(hab_driver.class); + pr_err("class_create failed: %d\n", result); + goto err; + } + + hab_driver.dev = device_create(hab_driver.class, NULL, + dev, &hab_driver, "hab"); + + if (IS_ERR(hab_driver.dev)) { + result = PTR_ERR(hab_driver.dev); + pr_err("device_create failed: %d\n", result); + goto err; + } + + for (i = 0; i < hab_driver.ndevices; i++) { + device = &hab_driver.devp[i]; + init_waitqueue_head(&device->openq); + } + + hab_hypervisor_register(); + + hab_driver.kctx = hab_ctx_alloc(1); + if (!hab_driver.kctx) { + pr_err("hab_ctx_alloc failed"); + result = -ENOMEM; + hab_hypervisor_unregister(); + goto err; + } + + set_dma_ops(hab_driver.dev, &hab_dma_ops); + + return result; + +err: + if (!IS_ERR_OR_NULL(hab_driver.dev)) + device_destroy(hab_driver.class, dev); + if (!IS_ERR_OR_NULL(hab_driver.class)) + class_destroy(hab_driver.class); + cdev_del(&hab_driver.cdev); + unregister_chrdev_region(dev, 1); + + return result; +} + +static void __exit hab_exit(void) +{ + dev_t dev; + + hab_hypervisor_unregister(); + hab_ctx_put(hab_driver.kctx); + dev = MKDEV(MAJOR(hab_driver.major), 0); + device_destroy(hab_driver.class, dev); + class_destroy(hab_driver.class); + cdev_del(&hab_driver.cdev); + unregister_chrdev_region(dev, 1); +} + +subsys_initcall(hab_init); +module_exit(hab_exit); + +MODULE_DESCRIPTION("Hypervisor abstraction layer"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/hab/hab.h b/drivers/soc/qcom/hab/hab.h new file mode 100644 index 000000000000..805e5b4a7008 --- /dev/null +++ b/drivers/soc/qcom/hab/hab.h @@ -0,0 +1,415 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __HAB_H +#define __HAB_H + +#define pr_fmt(fmt) "hab: " fmt + +#include <linux/types.h> + +#include <linux/habmm.h> + +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/sched.h> +#include <linux/cdev.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/rbtree.h> +#include <linux/idr.h> +#include <linux/module.h> +#include <linux/uaccess.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> + +enum hab_payload_type { + HAB_PAYLOAD_TYPE_MSG = 0x0, + HAB_PAYLOAD_TYPE_INIT, + HAB_PAYLOAD_TYPE_INIT_ACK, + HAB_PAYLOAD_TYPE_ACK, + HAB_PAYLOAD_TYPE_EXPORT, + HAB_PAYLOAD_TYPE_EXPORT_ACK, + HAB_PAYLOAD_TYPE_PROFILE, + HAB_PAYLOAD_TYPE_CLOSE, +}; +#define LOOPBACK_DOM 0xFF + +/* + * Tuning required. If there are multiple clients, the aging of previous + * "request" might be discarded + */ +#define Q_AGE_THRESHOLD 1000000 + +/* match the name to dtsi if for real HYP framework */ +#define DEVICE_AUD1_NAME "hab_aud1" +#define DEVICE_AUD2_NAME "hab_aud2" +#define DEVICE_AUD3_NAME "hab_aud3" +#define DEVICE_AUD4_NAME "hab_aud4" +#define DEVICE_CAM_NAME "hab_cam" +#define DEVICE_DISP1_NAME "hab_disp1" +#define DEVICE_DISP2_NAME "hab_disp2" +#define DEVICE_DISP3_NAME "hab_disp3" +#define DEVICE_DISP4_NAME "hab_disp4" +#define DEVICE_DISP5_NAME "hab_disp5" +#define DEVICE_GFX_NAME "hab_ogles" +#define DEVICE_VID_NAME "hab_vid" +#define DEVICE_MISC_NAME "hab_misc" +#define DEVICE_QCPE1_NAME "hab_qcpe_vm1" +#define DEVICE_QCPE2_NAME "hab_qcpe_vm2" +#define DEVICE_QCPE3_NAME "hab_qcpe_vm3" +#define DEVICE_QCPE4_NAME "hab_qcpe_vm4" + +/* "Size" of the HAB_HEADER_ID and HAB_VCID_ID must match */ +#define HAB_HEADER_SIZE_SHIFT 0 +#define HAB_HEADER_TYPE_SHIFT 16 +#define HAB_HEADER_ID_SHIFT 24 +#define HAB_HEADER_SIZE_MASK 0x0000FFFF +#define HAB_HEADER_TYPE_MASK 0x00FF0000 +#define HAB_HEADER_ID_MASK 0xFF000000 +#define HAB_HEADER_INITIALIZER {0} + +#define HAB_MMID_GET_MAJOR(mmid) (mmid & 0xFFFF) +#define HAB_MMID_GET_MINOR(mmid) ((mmid>>16) & 0xFF) + +#define HAB_VCID_ID_SHIFT 0 +#define HAB_VCID_DOMID_SHIFT 8 +#define HAB_VCID_MMID_SHIFT 16 +#define HAB_VCID_ID_MASK 0x000000FF +#define HAB_VCID_DOMID_MASK 0x0000FF00 +#define HAB_VCID_MMID_MASK 0xFFFF0000 +#define HAB_VCID_GET_ID(vcid) \ + (((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT) + +#define HAB_HEADER_SET_SIZE(header, size) \ + ((header).info = (((header).info) & (~HAB_HEADER_SIZE_MASK)) | \ + (((size) << HAB_HEADER_SIZE_SHIFT) & HAB_HEADER_SIZE_MASK)) + +#define HAB_HEADER_SET_TYPE(header, type) \ + ((header).info = (((header).info) & (~HAB_HEADER_TYPE_MASK)) | \ + (((type) << HAB_HEADER_TYPE_SHIFT) & HAB_HEADER_TYPE_MASK)) + +#define HAB_HEADER_SET_ID(header, id) \ + ((header).info = (((header).info) & (~HAB_HEADER_ID_MASK)) | \ + ((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) \ + & HAB_HEADER_ID_MASK)) + +#define HAB_HEADER_GET_SIZE(header) \ + ((((header).info) & HAB_HEADER_SIZE_MASK) >> HAB_HEADER_SIZE_SHIFT) + +#define HAB_HEADER_GET_TYPE(header) \ + ((((header).info) & HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT) + +#define HAB_HEADER_GET_ID(header) \ + (((((header).info) & HAB_HEADER_ID_MASK) >> \ + (HAB_HEADER_ID_SHIFT - HAB_VCID_ID_SHIFT)) & HAB_VCID_ID_MASK) + +struct hab_header { + uint32_t info; +}; + +struct physical_channel { + struct kref refcount; + struct hab_device *habdev; + struct list_head node; + struct idr vchan_idr; + spinlock_t vid_lock; + + struct idr expid_idr; + spinlock_t expid_lock; + + void *hyp_data; + int dom_id; + int closed; + + spinlock_t rxbuf_lock; +}; + +struct hab_open_send_data { + int vchan_id; + int sub_id; + int open_id; +}; + +struct hab_open_request { + int type; + struct physical_channel *pchan; + int vchan_id; + int sub_id; + int open_id; +}; + +struct hab_open_node { + struct hab_open_request request; + struct list_head node; + int age; +}; + +struct hab_export_ack { + uint32_t export_id; + int32_t vcid_local; + int32_t vcid_remote; +}; + +struct hab_export_ack_recvd { + struct hab_export_ack ack; + struct list_head node; + int age; +}; + +struct hab_message { + size_t sizebytes; + struct list_head node; + uint32_t data[]; +}; + +struct hab_device { + const char *name; + unsigned int id; + struct list_head pchannels; + struct mutex pchan_lock; + struct list_head openq_list; + spinlock_t openlock; + wait_queue_head_t openq; +}; + +struct uhab_context { + struct kref refcount; + struct list_head vchannels; + + struct list_head exp_whse; + uint32_t export_total; + + wait_queue_head_t exp_wq; + struct list_head exp_rxq; + rwlock_t exp_lock; + spinlock_t expq_lock; + + struct list_head imp_whse; + spinlock_t imp_lock; + uint32_t import_total; + + void *import_ctx; + + rwlock_t ctx_lock; + int closing; + int kernel; +}; + +struct hab_driver { + struct device *dev; + struct cdev cdev; + dev_t major; + struct class *class; + int irq; + + int ndevices; + struct hab_device *devp; + struct uhab_context *kctx; + int b_server_dom; + int loopback_num; + int b_loopback; +}; + +struct virtual_channel { + struct work_struct work; + /* + * refcount is used to track the references from hab core to the virtual + * channel such as references from physical channels, + * i.e. references from the "other" side + */ + struct kref refcount; + /* + * usagecnt is used to track the clients who are using this virtual + * channel such as local clients, client sowftware etc, + * i.e. references from "this" side + */ + struct kref usagecnt; + struct physical_channel *pchan; + struct uhab_context *ctx; + struct list_head node; + struct list_head rx_list; + wait_queue_head_t rx_queue; + spinlock_t rx_lock; + int id; + int otherend_id; + int otherend_closed; +}; + +/* + * Struct shared between local and remote, contents are composed by exporter, + * the importer only writes to pdata and local (exporter) domID + */ +struct export_desc { + uint32_t export_id; + int readonly; + uint64_t import_index; + + struct virtual_channel *vchan; + + int32_t vcid_local; + int32_t vcid_remote; + int domid_local; + int domid_remote; + + struct list_head node; + void *kva; + int payload_count; + unsigned char payload[1]; +}; + +int hab_vchan_open(struct uhab_context *ctx, + unsigned int mmid, int32_t *vcid, uint32_t flags); +void hab_vchan_close(struct uhab_context *ctx, + int32_t vcid); +long hab_vchan_send(struct uhab_context *ctx, + int vcid, + size_t sizebytes, + void *data, + unsigned int flags); +struct hab_message *hab_vchan_recv(struct uhab_context *ctx, + int vcid, + unsigned int flags); +void hab_vchan_stop(struct virtual_channel *vchan); +void hab_vchan_stop_notify(struct virtual_channel *vchan); + +int hab_mem_export(struct uhab_context *ctx, + struct hab_export *param, int kernel); +int hab_mem_import(struct uhab_context *ctx, + struct hab_import *param, int kernel); +int hab_mem_unexport(struct uhab_context *ctx, + struct hab_unexport *param, int kernel); +int hab_mem_unimport(struct uhab_context *ctx, + struct hab_unimport *param, int kernel); + +void habmem_remove_export(struct export_desc *exp); + +/* memory hypervisor framework plugin I/F */ +void *habmm_hyp_allocate_grantable(int page_count, + uint32_t *sizebytes); + +int habmem_hyp_grant_user(unsigned long address, + int page_count, + int flags, + int remotedom, + void *ppdata); + +int habmem_hyp_grant(unsigned long address, + int page_count, + int flags, + int remotedom, + void *ppdata); + +int habmem_hyp_revoke(void *expdata, uint32_t count); + +void *habmem_imp_hyp_open(void); +void habmem_imp_hyp_close(void *priv, int kernel); + +long habmem_imp_hyp_map(void *priv, void *impdata, uint32_t count, + uint32_t remotedom, + uint64_t *index, + void **pkva, + int kernel, + uint32_t userflags); + +long habmm_imp_hyp_unmap(void *priv, uint64_t index, + uint32_t count, + int kernel); + +int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma); + + + +void hab_msg_free(struct hab_message *message); +struct hab_message *hab_msg_dequeue(struct virtual_channel *vchan, + int wait_flag); + +void hab_msg_recv(struct physical_channel *pchan, + struct hab_header *header); + +void hab_open_request_init(struct hab_open_request *request, + int type, + struct physical_channel *pchan, + int vchan_id, + int sub_id, + int open_id); +int hab_open_request_send(struct hab_open_request *request); +int hab_open_request_add(struct physical_channel *pchan, + struct hab_header *header); +void hab_open_request_free(struct hab_open_request *request); +int hab_open_listen(struct uhab_context *ctx, + struct hab_device *dev, + struct hab_open_request *listen, + struct hab_open_request **recv_request, + int ms_timeout); + +struct virtual_channel *hab_vchan_alloc(struct uhab_context *ctx, + struct physical_channel *pchan); +struct virtual_channel *hab_vchan_get(struct physical_channel *pchan, + uint32_t vchan_id); +void hab_vchan_put(struct virtual_channel *vchan); + +struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid, + struct uhab_context *ctx); +struct physical_channel *hab_pchan_alloc(struct hab_device *habdev, + int otherend_id); +struct physical_channel *hab_pchan_find_domid(struct hab_device *dev, + int dom_id); +int hab_vchan_find_domid(struct virtual_channel *vchan); + +void hab_pchan_get(struct physical_channel *pchan); +void hab_pchan_put(struct physical_channel *pchan); + +struct uhab_context *hab_ctx_alloc(int kernel); + +void hab_ctx_free(struct kref *ref); + +static inline void hab_ctx_get(struct uhab_context *ctx) +{ + if (ctx) + kref_get(&ctx->refcount); +} + +static inline void hab_ctx_put(struct uhab_context *ctx) +{ + if (ctx) + kref_put(&ctx->refcount, hab_ctx_free); +} + +void hab_send_close_msg(struct virtual_channel *vchan); +int hab_hypervisor_register(void); +void hab_hypervisor_unregister(void); + +int physical_channel_read(struct physical_channel *pchan, + void *payload, + size_t read_size); + +int physical_channel_send(struct physical_channel *pchan, + struct hab_header *header, + void *payload); + +void physical_channel_rx_dispatch(unsigned long physical_channel); + +int loopback_pchan_create(char *dev_name); + +bool hab_is_loopback(void); + +/* Global singleton HAB instance */ +extern struct hab_driver hab_driver; + +#endif /* __HAB_H */ diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts b/drivers/soc/qcom/hab/hab_grantable.h index cf167897bb89..8a3f9721a89a 100644 --- a/arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts +++ b/drivers/soc/qcom/hab/hab_grantable.h @@ -1,5 +1,4 @@ -/* - * Copyright (c) 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -9,16 +8,22 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. + * */ +#ifndef __HAB_GRANTABLE_H +#define __HAB_GRANTABLE_H -/dts-v1/; - -#include "msm8998-9x55.dtsi" -#include "msm8998-mdss-panels.dtsi" -#include "msm8998-cdp.dtsi" +/* Grantable should be common between exporter and importer */ +struct grantable { + unsigned long pfn; +}; -/ { - model = "Qualcomm Technologies, Inc. MSM8998-9x55 CDP"; - compatible = "qcom,msm8998-9x55-cdp", "qcom,msm8998-9x55", "qcom,cdp"; - qcom,board-id= <1 2>; +struct compressed_pfns { + unsigned long first_pfn; + int nregions; + struct region { + int size; + int space; + } region[]; }; +#endif /* __HAB_GRANTABLE_H */ diff --git a/drivers/soc/qcom/hab/hab_mem_linux.c b/drivers/soc/qcom/hab/hab_mem_linux.c new file mode 100644 index 000000000000..ab4b9d0885cb --- /dev/null +++ b/drivers/soc/qcom/hab/hab_mem_linux.c @@ -0,0 +1,451 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include "hab.h" +#include <linux/fdtable.h> +#include <linux/dma-buf.h> +#include "hab_grantable.h" + + +struct pages_list { + struct list_head list; + struct page **pages; + long npages; + uint64_t index; /* for mmap first call */ + int kernel; + void *kva; + void *uva; + int refcntk; + int refcntu; + uint32_t userflags; + struct file *filp_owner; + struct file *filp_mapper; +}; + +struct importer_context { + int cnt; /* pages allocated for local file */ + struct list_head imp_list; + struct file *filp; +}; + +void *habmm_hyp_allocate_grantable(int page_count, + uint32_t *sizebytes) +{ + if (!sizebytes || !page_count) + return NULL; + + *sizebytes = page_count * sizeof(struct grantable); + return vmalloc(*sizebytes); +} + +static int match_file(const void *p, struct file *file, unsigned int fd) +{ + /* + * We must return fd + 1 because iterate_fd stops searching on + * non-zero return, but 0 is a valid fd. + */ + return (p == file) ? (fd + 1) : 0; +} + + +static int habmem_get_dma_pages(unsigned long address, + int page_count, + struct page **pages) +{ + struct vm_area_struct *vma; + struct dma_buf *dmabuf = NULL; + unsigned long offset; + unsigned long page_offset; + struct scatterlist *s; + struct sg_table *sg_table = NULL; + struct dma_buf_attachment *attach = NULL; + struct page *page; + int i, j, rc = 0; + int fd; + + vma = find_vma(current->mm, address); + if (!vma || !vma->vm_file) + goto err; + + /* Look for the fd that matches this the vma file */ + fd = iterate_fd(current->files, 0, match_file, vma->vm_file); + if (fd == 0) { + pr_err("iterate_fd failed\n"); + goto err; + } + + offset = address - vma->vm_start; + page_offset = offset/PAGE_SIZE; + + dmabuf = dma_buf_get(fd - 1); + + attach = dma_buf_attach(dmabuf, hab_driver.dev); + if (IS_ERR_OR_NULL(attach)) { + pr_err("dma_buf_attach failed\n"); + goto err; + } + + sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE); + + if (IS_ERR_OR_NULL(sg_table)) { + pr_err("dma_buf_map_attachment failed\n"); + goto err; + } + + for_each_sg(sg_table->sgl, s, sg_table->nents, i) { + page = sg_page(s); + + for (j = page_offset; j < (s->length >> PAGE_SHIFT); j++) { + pages[rc] = nth_page(page, j); + rc++; + if (rc >= page_count) + break; + } + if (rc >= page_count) + break; + + if (page_offset > (s->length >> PAGE_SHIFT)) { + /* carry-over the remaining offset to next s list */ + page_offset = page_offset-(s->length >> PAGE_SHIFT); + } else { + /* + * the page_offset is within this s list + * there is no more offset for the next s list + */ + page_offset = 0; + } + + } + +err: + if (!IS_ERR_OR_NULL(sg_table)) + dma_buf_unmap_attachment(attach, sg_table, DMA_TO_DEVICE); + if (!IS_ERR_OR_NULL(attach)) + dma_buf_detach(dmabuf, attach); + if (!IS_ERR_OR_NULL(dmabuf)) + dma_buf_put(dmabuf); + return rc; +} + +int habmem_hyp_grant_user(unsigned long address, + int page_count, + int flags, + int remotedom, + void *ppdata) +{ + int i, ret = 0; + struct grantable *item = (struct grantable *)ppdata; + struct page **pages; + + pages = vmalloc(page_count * sizeof(struct page *)); + if (!pages) + return -ENOMEM; + + down_read(¤t->mm->mmap_sem); + + if (HABMM_EXP_MEM_TYPE_DMA & flags) { + ret = habmem_get_dma_pages(address, + page_count, + pages); + } else { + ret = get_user_pages(current, current->mm, + address, + page_count, + 1, + 1, + pages, + NULL); + } + + if (ret > 0) { + for (i = 0; i < page_count; i++) + item[i].pfn = page_to_pfn(pages[i]); + } else { + pr_err("get %d user pages failed: %d\n", page_count, ret); + } + + vfree(pages); + up_read(¤t->mm->mmap_sem); + return ret; +} +/* + * exporter - grant & revoke + * generate shareable page list based on CPU friendly virtual "address". + * The result as an array is stored in ppdata to return to caller + * page size 4KB is assumed + */ +int habmem_hyp_grant(unsigned long address, + int page_count, + int flags, + int remotedom, + void *ppdata) +{ + int i; + struct grantable *item; + void *kva = (void *)(uintptr_t)address; + int is_vmalloc = is_vmalloc_addr(kva); + + item = (struct grantable *)ppdata; + + for (i = 0; i < page_count; i++) { + kva = (void *)(uintptr_t)(address + i*PAGE_SIZE); + if (is_vmalloc) + item[i].pfn = page_to_pfn(vmalloc_to_page(kva)); + else + item[i].pfn = page_to_pfn(virt_to_page(kva)); + } + + return 0; +} + +int habmem_hyp_revoke(void *expdata, uint32_t count) +{ + return 0; +} + +void *habmem_imp_hyp_open(void) +{ + struct importer_context *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + + INIT_LIST_HEAD(&priv->imp_list); + + return priv; +} + +void habmem_imp_hyp_close(void *imp_ctx, int kernel) +{ + struct importer_context *priv = imp_ctx; + struct pages_list *pglist, *pglist_tmp; + + if (!priv) + return; + + list_for_each_entry_safe(pglist, pglist_tmp, &priv->imp_list, list) { + if (kernel && pglist->kva) + vunmap(pglist->kva); + + list_del(&pglist->list); + priv->cnt--; + + vfree(pglist->pages); + kfree(pglist); + } + + kfree(priv); +} + +/* + * setup pages, be ready for the following mmap call + * index is output to refer to this imported buffer described by the import data + */ +long habmem_imp_hyp_map(void *imp_ctx, + void *impdata, + uint32_t count, + uint32_t remotedom, + uint64_t *index, + void **pkva, + int kernel, + uint32_t userflags) +{ + struct page **pages; + struct compressed_pfns *pfn_table = impdata; + struct pages_list *pglist; + struct importer_context *priv = imp_ctx; + unsigned long pfn; + int i, j, k = 0; + + if (!pfn_table || !priv) + return -EINVAL; + + pages = vmalloc(count * sizeof(struct page *)); + if (!pages) + return -ENOMEM; + + pglist = kzalloc(sizeof(*pglist), GFP_KERNEL); + if (!pglist) { + vfree(pages); + return -ENOMEM; + } + + pfn = pfn_table->first_pfn; + for (i = 0; i < pfn_table->nregions; i++) { + for (j = 0; j < pfn_table->region[i].size; j++) { + pages[k] = pfn_to_page(pfn+j); + k++; + } + pfn += pfn_table->region[i].size + pfn_table->region[i].space; + } + + pglist->pages = pages; + pglist->npages = count; + pglist->kernel = kernel; + pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT; + pglist->refcntk = pglist->refcntu = 0; + pglist->userflags = userflags; + + *index = pglist->index << PAGE_SHIFT; + + if (kernel) { + pgprot_t prot = PAGE_KERNEL; + + if (!(userflags & HABMM_IMPORT_FLAGS_CACHED)) + prot = pgprot_writecombine(prot); + + pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot); + if (pglist->kva == NULL) { + vfree(pages); + kfree(pglist); + pr_err("%ld pages vmap failed\n", pglist->npages); + return -ENOMEM; + } + + pglist->uva = NULL; + pglist->refcntk++; + *pkva = pglist->kva; + *index = (uint64_t)((uintptr_t)pglist->kva); + } else { + pglist->kva = NULL; + } + + list_add_tail(&pglist->list, &priv->imp_list); + priv->cnt++; + + return 0; +} + +/* the input index is PHY address shifted for uhab, and kva for khab */ +long habmm_imp_hyp_unmap(void *imp_ctx, + uint64_t index, + uint32_t count, + int kernel) +{ + struct importer_context *priv = imp_ctx; + struct pages_list *pglist; + int found = 0; + uint64_t pg_index = index >> PAGE_SHIFT; + + list_for_each_entry(pglist, &priv->imp_list, list) { + if (kernel) { + if (pglist->kva == (void *)((uintptr_t)index)) + found = 1; + } else { + if (pglist->index == pg_index) + found = 1; + } + + if (found) { + list_del(&pglist->list); + priv->cnt--; + break; + } + } + + if (!found) { + pr_err("failed to find export id on index %llx\n", index); + return -EINVAL; + } + + if (kernel) + if (pglist->kva) + vunmap(pglist->kva); + + vfree(pglist->pages); + kfree(pglist); + + return 0; +} + +static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *page; + struct pages_list *pglist; + + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + + /* PHY address */ + unsigned long fault_offset = + (unsigned long)vmf->virtual_address - vma->vm_start + offset; + unsigned long fault_index = fault_offset>>PAGE_SHIFT; + int page_idx; + + if (vma == NULL) + return VM_FAULT_SIGBUS; + + pglist = vma->vm_private_data; + + page_idx = fault_index - pglist->index; + if (page_idx < 0 || page_idx >= pglist->npages) { + pr_err("Out of page array. page_idx %d, pg cnt %ld", + page_idx, pglist->npages); + return VM_FAULT_SIGBUS; + } + + page = pglist->pages[page_idx]; + get_page(page); + vmf->page = page; + return 0; +} + +static void hab_map_open(struct vm_area_struct *vma) +{ +} + +static void hab_map_close(struct vm_area_struct *vma) +{ +} + +static const struct vm_operations_struct habmem_vm_ops = { + + .fault = hab_map_fault, + .open = hab_map_open, + .close = hab_map_close, +}; + +int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct uhab_context *ctx = (struct uhab_context *) filp->private_data; + struct importer_context *imp_ctx = ctx->import_ctx; + long length = vma->vm_end - vma->vm_start; + struct pages_list *pglist; + int bfound = 0; + + list_for_each_entry(pglist, &imp_ctx->imp_list, list) { + if (pglist->index == vma->vm_pgoff) { + bfound = 1; + break; + } + } + + if (!bfound) { + pr_err("Failed to find pglist vm_pgoff: %d\n", vma->vm_pgoff); + return -EINVAL; + } + + if (length > pglist->npages * PAGE_SIZE) { + pr_err("Error vma length %ld not matching page list %ld\n", + length, pglist->npages * PAGE_SIZE); + return -EINVAL; + } + + vma->vm_ops = &habmem_vm_ops; + + vma->vm_private_data = pglist; + + if (!(pglist->userflags & HABMM_IMPORT_FLAGS_CACHED)) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + return 0; +} diff --git a/drivers/soc/qcom/hab/hab_mimex.c b/drivers/soc/qcom/hab/hab_mimex.c new file mode 100644 index 000000000000..aaef9aa9f414 --- /dev/null +++ b/drivers/soc/qcom/hab/hab_mimex.c @@ -0,0 +1,394 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include "hab.h" +#include "hab_grantable.h" + +/* + * use physical channel to send export parcel + + * local remote + * send(export) --> IRQ store to export warehouse + * wait(export ack) <-- send(export ack) + + * the actual data consists the following 3 parts listed in order + * 1. header (uint32_t) vcid|type|size + * 2. export parcel (full struct) + * 3. full contents in export->pdata + */ + + +static int hab_export_ack_find(struct uhab_context *ctx, + struct hab_export_ack *expect_ack) +{ + int ret = 0; + struct hab_export_ack_recvd *ack_recvd; + + spin_lock_bh(&ctx->expq_lock); + + list_for_each_entry(ack_recvd, &ctx->exp_rxq, node) { + if (ack_recvd->ack.export_id == expect_ack->export_id && + ack_recvd->ack.vcid_local == expect_ack->vcid_local && + ack_recvd->ack.vcid_remote == expect_ack->vcid_remote) { + list_del(&ack_recvd->node); + kfree(ack_recvd); + ret = 1; + break; + } + ack_recvd->age++; + if (ack_recvd->age > Q_AGE_THRESHOLD) { + list_del(&ack_recvd->node); + kfree(ack_recvd); + } + } + + spin_unlock_bh(&ctx->expq_lock); + + return ret; +} + +static int hab_export_ack_wait(struct uhab_context *ctx, + struct hab_export_ack *expect_ack) +{ + int ret; + + ret = wait_event_interruptible_timeout(ctx->exp_wq, + hab_export_ack_find(ctx, expect_ack), + HZ); + if (!ret || (ret == -ERESTARTSYS)) + ret = -EAGAIN; + else if (ret > 0) + ret = 0; + return ret; +} + +/* + * Get id from free list first. if not available, new id is generated. + * Once generated it will not be erased + * assumptions: no handshake or memory map/unmap in this helper function + */ +static struct export_desc *habmem_add_export(struct virtual_channel *vchan, + int sizebytes, + uint32_t flags) +{ + struct uhab_context *ctx; + struct export_desc *exp; + + if (!vchan || !sizebytes) + return NULL; + + exp = vmalloc(sizebytes); + if (!exp) + return NULL; + + idr_preload(GFP_KERNEL); + spin_lock(&vchan->pchan->expid_lock); + exp->export_id = + idr_alloc(&vchan->pchan->expid_idr, exp, 1, 0, GFP_NOWAIT); + spin_unlock(&vchan->pchan->expid_lock); + idr_preload_end(); + + exp->readonly = flags; + exp->vchan = vchan; + exp->vcid_local = vchan->id; + exp->vcid_remote = vchan->otherend_id; + exp->domid_local = -1; /* dom id, provided on the importer */ + exp->domid_remote = vchan->pchan->dom_id; + + ctx = vchan->ctx; + write_lock(&ctx->exp_lock); + ctx->export_total++; + list_add_tail(&exp->node, &ctx->exp_whse); + write_unlock(&ctx->exp_lock); + + return exp; +} + +void habmem_remove_export(struct export_desc *exp) +{ + struct physical_channel *pchan; + struct uhab_context *ctx; + + if (!exp || !exp->vchan || !exp->vchan->ctx || !exp->vchan->pchan) + return; + + ctx = exp->vchan->ctx; + ctx->export_total--; + + pchan = exp->vchan->pchan; + + spin_lock(&pchan->expid_lock); + idr_remove(&pchan->expid_idr, exp->export_id); + spin_unlock(&pchan->expid_lock); + + vfree(exp); +} + +static int compress_pfns(void **pfns, int npages, unsigned int *data_size) +{ + int i, j = 0; + struct grantable *item = (struct grantable *)*pfns; + int region_size = 1; + struct compressed_pfns *new_table = + vmalloc(sizeof(struct compressed_pfns) + + npages * sizeof(struct region)); + + if (!new_table) + return -ENOMEM; + + new_table->first_pfn = item[0].pfn; + for (i = 1; i < npages; i++) { + if (item[i].pfn-1 == item[i-1].pfn) { + region_size++; + } else { + new_table->region[j].size = region_size; + new_table->region[j].space = item[i].pfn - + item[i-1].pfn - 1; + j++; + region_size = 1; + } + } + new_table->region[j].size = region_size; + new_table->region[j].space = 0; + new_table->nregions = j+1; + vfree(*pfns); + + *data_size = sizeof(struct compressed_pfns) + + sizeof(struct region)*new_table->nregions; + *pfns = new_table; + return 0; +} + +/* + * store the parcel to the warehouse, then send the parcel to remote side + * both exporter composed export descriptor and the grantrefids are sent + * as one msg to the importer side + */ +static int habmem_export_vchan(struct uhab_context *ctx, + struct virtual_channel *vchan, + void *pdata, + int payload_size, + int nunits, + uint32_t flags, + uint32_t *export_id) { + int ret; + struct export_desc *exp; + uint32_t sizebytes = sizeof(*exp) + payload_size; + struct hab_export_ack expected_ack = {0}; + struct hab_header header = HAB_HEADER_INITIALIZER; + + exp = habmem_add_export(vchan, sizebytes, flags); + if (!exp) + return -ENOMEM; + + /* append the pdata to the export descriptor */ + exp->payload_count = nunits; + memcpy(exp->payload, pdata, payload_size); + + HAB_HEADER_SET_SIZE(header, sizebytes); + HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT); + HAB_HEADER_SET_ID(header, vchan->otherend_id); + ret = physical_channel_send(vchan->pchan, &header, exp); + + if (ret != 0) { + pr_err("failed to export payload to the remote %d\n", ret); + return ret; + } + + expected_ack.export_id = exp->export_id; + expected_ack.vcid_local = exp->vcid_local; + expected_ack.vcid_remote = exp->vcid_remote; + ret = hab_export_ack_wait(ctx, &expected_ack); + + *export_id = exp->export_id; + + return ret; +} + +int hab_mem_export(struct uhab_context *ctx, + struct hab_export *param, + int kernel) +{ + int ret = 0; + void *pdata_exp = NULL; + unsigned int pdata_size = 0; + uint32_t export_id = 0; + struct virtual_channel *vchan; + int page_count; + + if (!ctx || !param || param->sizebytes > HAB_MAX_EXPORT_SIZE) + return -EINVAL; + + vchan = hab_get_vchan_fromvcid(param->vcid, ctx); + if (!vchan || !vchan->pchan) { + ret = -ENODEV; + goto err; + } + + page_count = param->sizebytes/PAGE_SIZE; + pdata_exp = habmm_hyp_allocate_grantable(page_count, &pdata_size); + if (!pdata_exp) { + ret = -ENOMEM; + goto err; + } + + if (kernel) { + ret = habmem_hyp_grant((unsigned long)param->buffer, + page_count, + param->flags, + vchan->pchan->dom_id, + pdata_exp); + } else { + ret = habmem_hyp_grant_user((unsigned long)param->buffer, + page_count, + param->flags, + vchan->pchan->dom_id, + pdata_exp); + } + if (ret < 0) { + pr_err("habmem_hyp_grant failed size=%d ret=%d\n", + pdata_size, ret); + goto err; + } + + compress_pfns(&pdata_exp, page_count, &pdata_size); + + ret = habmem_export_vchan(ctx, + vchan, + pdata_exp, + pdata_size, + page_count, + param->flags, + &export_id); + + param->exportid = export_id; +err: + vfree(pdata_exp); + if (vchan) + hab_vchan_put(vchan); + return ret; +} + +int hab_mem_unexport(struct uhab_context *ctx, + struct hab_unexport *param, + int kernel) +{ + int ret = 0, found = 0; + struct export_desc *exp, *tmp; + + if (!ctx || !param) + return -EINVAL; + + write_lock(&ctx->exp_lock); + list_for_each_entry_safe(exp, tmp, &ctx->exp_whse, node) { + if ((param->exportid == exp->export_id) && + (param->vcid == exp->vcid_local)) { + list_del(&exp->node); + found = 1; + break; + } + } + write_unlock(&ctx->exp_lock); + + if (!found) + return -EINVAL; + + ret = habmem_hyp_revoke(exp->payload, exp->payload_count); + + habmem_remove_export(exp); + return ret; +} + +int hab_mem_import(struct uhab_context *ctx, + struct hab_import *param, + int kernel) +{ + int ret = 0, found = 0; + struct export_desc *exp = NULL; + + if (!ctx || !param) + return -EINVAL; + + spin_lock_bh(&ctx->imp_lock); + list_for_each_entry(exp, &ctx->imp_whse, node) { + if ((exp->export_id == param->exportid) && + (param->vcid == exp->vcid_remote)) { + found = 1; + break; + } + } + spin_unlock_bh(&ctx->imp_lock); + + if (!found) { + pr_err("Fail to get export descriptor from export id %d\n", + param->exportid); + ret = -ENODEV; + return ret; + } + + ret = habmem_imp_hyp_map(ctx->import_ctx, + exp->payload, + exp->payload_count, + exp->domid_local, + &exp->import_index, + &exp->kva, + kernel, + param->flags); + if (ret) { + pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n", + ret, exp->payload_count, + exp->domid_local, *((uint32_t *)exp->payload)); + return ret; + } + + param->index = exp->import_index; + param->kva = (uint64_t)exp->kva; + + return ret; +} + +int hab_mem_unimport(struct uhab_context *ctx, + struct hab_unimport *param, + int kernel) +{ + int ret = 0, found = 0; + struct export_desc *exp = NULL, *exp_tmp; + + if (!ctx || !param) + return -EINVAL; + + spin_lock_bh(&ctx->imp_lock); + list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) { + if ((exp->export_id == param->exportid) && + (param->vcid == exp->vcid_remote)) { + list_del(&exp->node); + ctx->import_total--; + found = 1; + break; + } + } + spin_unlock_bh(&ctx->imp_lock); + + if (!found) + ret = -EINVAL; + else { + ret = habmm_imp_hyp_unmap(ctx->import_ctx, + exp->import_index, + exp->payload_count, + kernel); + + param->kva = (uint64_t)exp->kva; + kfree(exp); + } + + return ret; +} diff --git a/drivers/soc/qcom/hab/hab_msg.c b/drivers/soc/qcom/hab/hab_msg.c new file mode 100644 index 000000000000..f08cc83fe9fc --- /dev/null +++ b/drivers/soc/qcom/hab/hab_msg.c @@ -0,0 +1,208 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include "hab.h" + +static int hab_rx_queue_empty(struct virtual_channel *vchan) +{ + int ret; + + spin_lock_bh(&vchan->rx_lock); + ret = list_empty(&vchan->rx_list); + spin_unlock_bh(&vchan->rx_lock); + return ret; +} + +static struct hab_message* +hab_msg_alloc(struct physical_channel *pchan, size_t sizebytes) +{ + struct hab_message *message; + + message = kzalloc(sizeof(*message) + sizebytes, GFP_ATOMIC); + if (!message) + return NULL; + + message->sizebytes = + physical_channel_read(pchan, message->data, sizebytes); + + return message; +} + +void hab_msg_free(struct hab_message *message) +{ + kfree(message); +} + +struct hab_message * +hab_msg_dequeue(struct virtual_channel *vchan, int wait_flag) +{ + struct hab_message *message = NULL; + int ret = 0; + + if (wait_flag) { + if (hab_rx_queue_empty(vchan)) + ret = wait_event_interruptible(vchan->rx_queue, + !hab_rx_queue_empty(vchan) || + vchan->otherend_closed); + } + + if (!ret && !vchan->otherend_closed) { + spin_lock_bh(&vchan->rx_lock); + if (!list_empty(&vchan->rx_list)) { + message = list_first_entry(&vchan->rx_list, + struct hab_message, node); + list_del(&message->node); + } + spin_unlock_bh(&vchan->rx_lock); + } + + return message; +} + +static void hab_msg_queue(struct virtual_channel *vchan, + struct hab_message *message) +{ + spin_lock_bh(&vchan->rx_lock); + list_add_tail(&message->node, &vchan->rx_list); + spin_unlock_bh(&vchan->rx_lock); + + wake_up_interruptible(&vchan->rx_queue); +} + +static int hab_export_enqueue(struct virtual_channel *vchan, + struct export_desc *exp) +{ + struct uhab_context *ctx = vchan->ctx; + + spin_lock_bh(&ctx->imp_lock); + list_add_tail(&exp->node, &ctx->imp_whse); + ctx->import_total++; + spin_unlock_bh(&ctx->imp_lock); + + return 0; +} + +static int hab_send_export_ack(struct physical_channel *pchan, + struct export_desc *exp) +{ + struct hab_export_ack exp_ack = { + .export_id = exp->export_id, + .vcid_local = exp->vcid_local, + .vcid_remote = exp->vcid_remote + }; + struct hab_header header = HAB_HEADER_INITIALIZER; + + HAB_HEADER_SET_SIZE(header, sizeof(exp_ack)); + HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT_ACK); + HAB_HEADER_SET_ID(header, exp->vcid_local); + return physical_channel_send(pchan, &header, &exp_ack); +} + +static int hab_receive_create_export_ack(struct physical_channel *pchan, + struct uhab_context *ctx) +{ + struct hab_export_ack_recvd *ack_recvd = + kzalloc(sizeof(*ack_recvd), GFP_ATOMIC); + + if (!ack_recvd) + return -ENOMEM; + + if (physical_channel_read(pchan, + &ack_recvd->ack, + sizeof(ack_recvd->ack)) != sizeof(ack_recvd->ack)) + return -EIO; + + spin_lock_bh(&ctx->expq_lock); + list_add_tail(&ack_recvd->node, &ctx->exp_rxq); + spin_unlock_bh(&ctx->expq_lock); + + return 0; +} + +void hab_msg_recv(struct physical_channel *pchan, + struct hab_header *header) +{ + int ret; + struct hab_message *message; + struct hab_device *dev = pchan->habdev; + size_t sizebytes = HAB_HEADER_GET_SIZE(*header); + uint32_t payload_type = HAB_HEADER_GET_TYPE(*header); + uint32_t vchan_id = HAB_HEADER_GET_ID(*header); + struct virtual_channel *vchan = NULL; + struct export_desc *exp_desc; + + /* get the local virtual channel if it isn't an open message */ + if (payload_type != HAB_PAYLOAD_TYPE_INIT && + payload_type != HAB_PAYLOAD_TYPE_INIT_ACK && + payload_type != HAB_PAYLOAD_TYPE_ACK) { + vchan = hab_vchan_get(pchan, vchan_id); + if (!vchan) { + return; + } else if (vchan->otherend_closed) { + hab_vchan_put(vchan); + return; + } + } + + switch (payload_type) { + case HAB_PAYLOAD_TYPE_MSG: + message = hab_msg_alloc(pchan, sizebytes); + if (!message) + break; + + hab_msg_queue(vchan, message); + break; + + case HAB_PAYLOAD_TYPE_INIT: + case HAB_PAYLOAD_TYPE_INIT_ACK: + case HAB_PAYLOAD_TYPE_ACK: + ret = hab_open_request_add(pchan, header); + if (ret) + break; + wake_up_interruptible(&dev->openq); + break; + + case HAB_PAYLOAD_TYPE_EXPORT: + exp_desc = kzalloc(sizebytes, GFP_ATOMIC); + if (!exp_desc) + break; + + if (physical_channel_read(pchan, exp_desc, sizebytes) != + sizebytes) { + vfree(exp_desc); + break; + } + + exp_desc->domid_local = pchan->dom_id; + + hab_export_enqueue(vchan, exp_desc); + hab_send_export_ack(pchan, exp_desc); + break; + + case HAB_PAYLOAD_TYPE_EXPORT_ACK: + ret = hab_receive_create_export_ack(pchan, vchan->ctx); + if (ret) + break; + + wake_up_interruptible(&vchan->ctx->exp_wq); + break; + + case HAB_PAYLOAD_TYPE_CLOSE: + hab_vchan_stop(vchan); + break; + + default: + break; + } + if (vchan) + hab_vchan_put(vchan); +} diff --git a/drivers/soc/qcom/hab/hab_open.c b/drivers/soc/qcom/hab/hab_open.c new file mode 100644 index 000000000000..66468aa43afd --- /dev/null +++ b/drivers/soc/qcom/hab/hab_open.c @@ -0,0 +1,154 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include "hab.h" + +void hab_open_request_init(struct hab_open_request *request, + int type, + struct physical_channel *pchan, + int vchan_id, + int sub_id, + int open_id) +{ + request->type = type; + request->pchan = pchan; + request->vchan_id = vchan_id; + request->sub_id = sub_id; + request->open_id = open_id; +} + +int hab_open_request_send(struct hab_open_request *request) +{ + struct hab_header header = HAB_HEADER_INITIALIZER; + struct hab_open_send_data data; + + HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data)); + HAB_HEADER_SET_TYPE(header, request->type); + + data.vchan_id = request->vchan_id; + data.open_id = request->open_id; + data.sub_id = request->sub_id; + + return physical_channel_send(request->pchan, &header, &data); +} + +int hab_open_request_add(struct physical_channel *pchan, + struct hab_header *header) +{ + struct hab_open_node *node; + struct hab_device *dev = pchan->habdev; + struct hab_open_send_data data; + struct hab_open_request *request; + + node = kzalloc(sizeof(*node), GFP_ATOMIC); + if (!node) + return -ENOMEM; + + if (physical_channel_read(pchan, &data, HAB_HEADER_GET_SIZE(*header)) != + HAB_HEADER_GET_SIZE(*header)) + return -EIO; + + request = &node->request; + request->type = HAB_HEADER_GET_TYPE(*header); + request->pchan = pchan; + request->vchan_id = data.vchan_id; + request->sub_id = data.sub_id; + request->open_id = data.open_id; + node->age = 0; + hab_pchan_get(pchan); + + spin_lock_bh(&dev->openlock); + list_add_tail(&node->node, &dev->openq_list); + spin_unlock_bh(&dev->openlock); + + return 0; +} + +static int hab_open_request_find(struct uhab_context *ctx, + struct hab_device *dev, + struct hab_open_request *listen, + struct hab_open_request **recv_request) +{ + struct hab_open_node *node, *tmp; + struct hab_open_request *request; + int ret = 0; + + if (ctx->closing || + (listen->pchan && listen->pchan->closed)) { + *recv_request = NULL; + return 1; + } + + spin_lock_bh(&dev->openlock); + if (list_empty(&dev->openq_list)) + goto done; + + list_for_each_entry_safe(node, tmp, &dev->openq_list, node) { + request = (struct hab_open_request *)node; + if (request->type == listen->type && + (request->sub_id == listen->sub_id) && + (!listen->open_id || + request->open_id == listen->open_id) && + (!listen->pchan || + request->pchan == listen->pchan)) { + list_del(&node->node); + *recv_request = request; + ret = 1; + break; + } + node->age++; + if (node->age > Q_AGE_THRESHOLD) { + list_del(&node->node); + hab_open_request_free(request); + } + } + +done: + spin_unlock_bh(&dev->openlock); + return ret; +} + +void hab_open_request_free(struct hab_open_request *request) +{ + if (request) { + hab_pchan_put(request->pchan); + kfree(request); + } +} + +int hab_open_listen(struct uhab_context *ctx, + struct hab_device *dev, + struct hab_open_request *listen, + struct hab_open_request **recv_request, + int ms_timeout) +{ + int ret = 0; + + if (!ctx || !listen || !recv_request) + return -EINVAL; + + *recv_request = NULL; + if (ms_timeout > 0) { + ret = wait_event_interruptible_timeout(dev->openq, + hab_open_request_find(ctx, dev, listen, recv_request), + ms_timeout); + if (!ret || (-ERESTARTSYS == ret)) + ret = -EAGAIN; + else if (ret > 0) + ret = 0; + } else { + ret = wait_event_interruptible(dev->openq, + hab_open_request_find(ctx, dev, listen, recv_request)); + } + + return ret; +} diff --git a/drivers/soc/qcom/hab/hab_pchan.c b/drivers/soc/qcom/hab/hab_pchan.c new file mode 100644 index 000000000000..1ad727f7d90f --- /dev/null +++ b/drivers/soc/qcom/hab/hab_pchan.c @@ -0,0 +1,86 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include "hab.h" + +struct physical_channel * +hab_pchan_alloc(struct hab_device *habdev, int otherend_id) +{ + struct physical_channel *pchan = kzalloc(sizeof(*pchan), GFP_KERNEL); + + if (!pchan) + return NULL; + + idr_init(&pchan->vchan_idr); + spin_lock_init(&pchan->vid_lock); + idr_init(&pchan->expid_idr); + spin_lock_init(&pchan->expid_lock); + kref_init(&pchan->refcount); + + pchan->habdev = habdev; + pchan->dom_id = otherend_id; + pchan->closed = 1; + pchan->hyp_data = NULL; + + spin_lock_init(&pchan->rxbuf_lock); + + mutex_lock(&habdev->pchan_lock); + list_add_tail(&pchan->node, &habdev->pchannels); + mutex_unlock(&habdev->pchan_lock); + + return pchan; +} + +static void hab_pchan_free(struct kref *ref) +{ + struct physical_channel *pchan = + container_of(ref, struct physical_channel, refcount); + + mutex_lock(&pchan->habdev->pchan_lock); + list_del(&pchan->node); + mutex_unlock(&pchan->habdev->pchan_lock); + kfree(pchan->hyp_data); + kfree(pchan); +} + +struct physical_channel * +hab_pchan_find_domid(struct hab_device *dev, int dom_id) +{ + struct physical_channel *pchan; + + mutex_lock(&dev->pchan_lock); + list_for_each_entry(pchan, &dev->pchannels, node) + if (pchan->dom_id == dom_id) + break; + + if (pchan->dom_id != dom_id) + pchan = NULL; + + if (pchan && !kref_get_unless_zero(&pchan->refcount)) + pchan = NULL; + + mutex_unlock(&dev->pchan_lock); + + return pchan; +} + +void hab_pchan_get(struct physical_channel *pchan) +{ + if (pchan) + kref_get(&pchan->refcount); +} + +void hab_pchan_put(struct physical_channel *pchan) +{ + if (pchan) + kref_put(&pchan->refcount, hab_pchan_free); +} diff --git a/drivers/soc/qcom/hab/hab_pipe.c b/drivers/soc/qcom/hab/hab_pipe.c new file mode 100644 index 000000000000..e757b6cb1f01 --- /dev/null +++ b/drivers/soc/qcom/hab/hab_pipe.c @@ -0,0 +1,131 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include "hab.h" +#include "hab_pipe.h" + +size_t hab_pipe_calc_required_bytes(uint32_t shared_buf_size) +{ + return sizeof(struct hab_pipe) + + (2 * (sizeof(struct hab_shared_buf) + shared_buf_size)); +} + +struct hab_pipe_endpoint *hab_pipe_init(struct hab_pipe *pipe, + uint32_t shared_buf_size, int top) +{ + struct hab_pipe_endpoint *ep = NULL; + struct hab_shared_buf *buf_a; + struct hab_shared_buf *buf_b; + + if (!pipe) + return NULL; + + buf_a = (struct hab_shared_buf *) pipe->buf_base; + buf_b = (struct hab_shared_buf *) (pipe->buf_base + + sizeof(struct hab_shared_buf) + shared_buf_size); + + if (top) { + ep = &pipe->top; + memset(ep, 0, sizeof(*ep)); + ep->tx_info.sh_buf = buf_a; + ep->rx_info.sh_buf = buf_b; + } else { + ep = &pipe->bottom; + memset(ep, 0, sizeof(*ep)); + ep->tx_info.sh_buf = buf_b; + ep->rx_info.sh_buf = buf_a; + memset(ep->tx_info.sh_buf, 0, sizeof(struct hab_shared_buf)); + memset(ep->rx_info.sh_buf, 0, sizeof(struct hab_shared_buf)); + ep->tx_info.sh_buf->size = shared_buf_size; + ep->rx_info.sh_buf->size = shared_buf_size; + + pipe->buf_a = buf_a; + pipe->buf_b = buf_b; + pipe->total_size = + hab_pipe_calc_required_bytes(shared_buf_size); + } + return ep; +} + +uint32_t hab_pipe_write(struct hab_pipe_endpoint *ep, + unsigned char *p, uint32_t num_bytes) +{ + struct hab_shared_buf *sh_buf = ep->tx_info.sh_buf; + uint32_t space = + (sh_buf->size - (ep->tx_info.wr_count - sh_buf->rd_count)); + uint32_t count1, count2; + + if (!p || num_bytes > space || num_bytes == 0) + return 0; + + count1 = (num_bytes <= (sh_buf->size - ep->tx_info.index)) ? num_bytes : + (sh_buf->size - ep->tx_info.index); + count2 = num_bytes - count1; + + if (count1 > 0) { + memcpy(&sh_buf->data[ep->tx_info.index], p, count1); + ep->tx_info.wr_count += count1; + ep->tx_info.index += count1; + if (ep->tx_info.index >= sh_buf->size) + ep->tx_info.index = 0; + } + if (count2 > 0) {/* handle buffer wrapping */ + memcpy(&sh_buf->data[ep->tx_info.index], p + count1, count2); + ep->tx_info.wr_count += count2; + ep->tx_info.index += count2; + if (ep->tx_info.index >= sh_buf->size) + ep->tx_info.index = 0; + } + return num_bytes; +} + +/* Updates the write index which is shared with the other VM */ +void hab_pipe_write_commit(struct hab_pipe_endpoint *ep) +{ + struct hab_shared_buf *sh_buf = ep->tx_info.sh_buf; + + mb(); /* Must commit data before incrementing count */ + sh_buf->wr_count = ep->tx_info.wr_count; +} + +uint32_t hab_pipe_read(struct hab_pipe_endpoint *ep, + unsigned char *p, uint32_t size) +{ + struct hab_shared_buf *sh_buf = ep->rx_info.sh_buf; + uint32_t avail = sh_buf->wr_count - sh_buf->rd_count; + uint32_t count1, count2, to_read; + + if (!p || avail == 0 || size == 0) + return 0; + + to_read = (avail < size) ? avail : size; + count1 = (to_read <= (sh_buf->size - ep->rx_info.index)) ? to_read : + (sh_buf->size - ep->rx_info.index); + count2 = to_read - count1; + + if (count1 > 0) { + memcpy(p, &sh_buf->data[ep->rx_info.index], count1); + ep->rx_info.index += count1; + if (ep->rx_info.index >= sh_buf->size) + ep->rx_info.index = 0; + mb(); /*Must commit data before incremeting count*/ + sh_buf->rd_count += count1; + } + if (count2 > 0) { /* handle buffer wrapping */ + memcpy(p + count1, &sh_buf->data[ep->rx_info.index], count2); + ep->rx_info.index += count2; + mb(); /*Must commit data before incremeting count*/ + sh_buf->rd_count += count2; + } + + return to_read; +} diff --git a/drivers/soc/qcom/hab/hab_pipe.h b/drivers/soc/qcom/hab/hab_pipe.h new file mode 100644 index 000000000000..6ffdbd133868 --- /dev/null +++ b/drivers/soc/qcom/hab/hab_pipe.h @@ -0,0 +1,60 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef HAB_PIPE_H +#define HAB_PIPE_H + +struct hab_shared_buf { + uint32_t rd_count; + uint32_t wr_count; + uint32_t size; + unsigned char data[]; +}; + +struct hab_pipe_endpoint { + struct { + uint32_t wr_count; + uint32_t index; + struct hab_shared_buf *sh_buf; + } tx_info; + struct { + uint32_t index; + struct hab_shared_buf *sh_buf; + } rx_info; +}; + +struct hab_pipe { + struct hab_pipe_endpoint top; + struct hab_pipe_endpoint bottom; + + /* For debugging only */ + struct hab_shared_buf *buf_a; /* top TX, bottom RX */ + struct hab_shared_buf *buf_b; /* top RX, bottom TX */ + size_t total_size; + + unsigned char buf_base[]; +}; + +size_t hab_pipe_calc_required_bytes(uint32_t shared_buf_size); + +struct hab_pipe_endpoint *hab_pipe_init(struct hab_pipe *pipe, + uint32_t shared_buf_size, int top); + +uint32_t hab_pipe_write(struct hab_pipe_endpoint *ep, + unsigned char *p, uint32_t num_bytes); + +void hab_pipe_write_commit(struct hab_pipe_endpoint *ep); + +uint32_t hab_pipe_read(struct hab_pipe_endpoint *ep, + unsigned char *p, uint32_t size); + +#endif /* HAB_PIPE_H */ diff --git a/drivers/soc/qcom/hab/hab_qvm.c b/drivers/soc/qcom/hab/hab_qvm.c new file mode 100644 index 000000000000..a37590f23c61 --- /dev/null +++ b/drivers/soc/qcom/hab/hab_qvm.c @@ -0,0 +1,251 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include "hab.h" +#include "hab_qvm.h" + +#include <linux/highmem.h> +#include <linux/string.h> +#include <linux/interrupt.h> +#include <linux/io.h> + +#include <linux/of.h> +#include <linux/of_platform.h> + +#define DEFAULT_HAB_SHMEM_IRQ 7 + +#define SHMEM_PHYSICAL_ADDR 0x1c050000 + +static irqreturn_t shm_irq_handler(int irq, void *_pchan) +{ + irqreturn_t rc = IRQ_NONE; + struct physical_channel *pchan = _pchan; + struct qvm_channel *dev = + (struct qvm_channel *) (pchan ? pchan->hyp_data : NULL); + + if (dev && dev->guest_ctrl) { + int status = dev->guest_ctrl->status; + + if (status & dev->idx) { + rc = IRQ_HANDLED; + tasklet_schedule(&dev->task); + } + } + return rc; +} + +static uint64_t get_guest_factory_paddr(struct qvm_channel *dev, + const char *name, uint32_t pages) +{ + int i; + + dev->guest_factory = ioremap(SHMEM_PHYSICAL_ADDR, PAGE_SIZE); + + if (!dev->guest_factory) { + pr_err("Couldn't map guest_factory\n"); + return 0; + } + + if (dev->guest_factory->signature != GUEST_SHM_SIGNATURE) { + pr_err("shmem factory signature incorrect: %ld != %lu\n", + GUEST_SHM_SIGNATURE, dev->guest_factory->signature); + iounmap(dev->guest_factory); + return 0; + } + + dev->guest_intr = dev->guest_factory->vector; + + /* + * Set the name field on the factory page to identify the shared memory + * region + */ + for (i = 0; i < strlen(name) && i < GUEST_SHM_MAX_NAME - 1; i++) + dev->guest_factory->name[i] = name[i]; + dev->guest_factory->name[i] = (char) 0; + + guest_shm_create(dev->guest_factory, pages); + + /* See if we successfully created/attached to the region. */ + if (dev->guest_factory->status != GSS_OK) { + pr_err("create failed: %d\n", dev->guest_factory->status); + iounmap(dev->guest_factory); + return 0; + } + + pr_debug("shm creation size %x\n", dev->guest_factory->size); + + return dev->guest_factory->shmem; +} + +static int create_dispatcher(struct physical_channel *pchan, int id) +{ + struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data; + int ret; + + tasklet_init(&dev->task, physical_channel_rx_dispatch, + (unsigned long) pchan); + + ret = request_irq(hab_driver.irq, shm_irq_handler, IRQF_SHARED, + hab_driver.devp[id].name, pchan); + + if (ret) + pr_err("request_irq for %s failed: %d\n", + hab_driver.devp[id].name, ret); + + return ret; +} + +static struct physical_channel *habhyp_commdev_alloc(int id) +{ + struct qvm_channel *dev; + struct physical_channel *pchan = NULL; + int ret = 0, channel = 0; + char *shmdata; + uint32_t pipe_alloc_size = + hab_pipe_calc_required_bytes(PIPE_SHMEM_SIZE); + uint32_t pipe_alloc_pages = + (pipe_alloc_size + PAGE_SIZE - 1) / PAGE_SIZE; + uint64_t paddr; + int temp; + int total_pages; + struct page **pages; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&dev->io_lock); + + paddr = get_guest_factory_paddr(dev, + hab_driver.devp[id].name, + pipe_alloc_pages); + + total_pages = dev->guest_factory->size + 1; + pages = kmalloc_array(total_pages, sizeof(struct page *), GFP_KERNEL); + if (!pages) { + ret = -ENOMEM; + goto err; + } + + for (temp = 0; temp < total_pages; temp++) + pages[temp] = pfn_to_page((paddr / PAGE_SIZE) + temp); + + dev->guest_ctrl = vmap(pages, total_pages, VM_MAP, PAGE_KERNEL); + if (!dev->guest_ctrl) { + ret = -ENOMEM; + kfree(pages); + goto err; + } + + shmdata = (char *)dev->guest_ctrl + PAGE_SIZE; + dev->idx = dev->guest_ctrl->idx; + + kfree(pages); + + dev->pipe = (struct hab_pipe *) shmdata; + dev->pipe_ep = hab_pipe_init(dev->pipe, PIPE_SHMEM_SIZE, + dev->be ? 0 : 1); + + pchan = hab_pchan_alloc(&hab_driver.devp[id], dev->be); + if (!pchan) { + ret = -ENOMEM; + goto err; + } + + pchan->closed = 0; + pchan->hyp_data = (void *)dev; + + dev->channel = channel; + + ret = create_dispatcher(pchan, id); + if (ret < 0) + goto err; + + return pchan; + +err: + kfree(dev); + + if (pchan) + hab_pchan_put(pchan); + pr_err("habhyp_commdev_alloc failed: %d\n", ret); + return ERR_PTR(ret); +} + +int hab_hypervisor_register(void) +{ + int ret = 0, i; + + hab_driver.b_server_dom = 0; + + /* + * Can still attempt to instantiate more channels if one fails. + * Others can be retried later. + */ + for (i = 0; i < hab_driver.ndevices; i++) { + if (IS_ERR(habhyp_commdev_alloc(i))) + ret = -EAGAIN; + } + + return ret; +} + +void hab_hypervisor_unregister(void) +{ +} + +static int hab_shmem_probe(struct platform_device *pdev) +{ + int irq = platform_get_irq(pdev, 0); + + if (irq > 0) + hab_driver.irq = irq; + else + hab_driver.irq = DEFAULT_HAB_SHMEM_IRQ; + + return 0; +} + +static int hab_shmem_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id hab_shmem_match_table[] = { + {.compatible = "qvm,guest_shm"}, + {}, +}; + +static struct platform_driver hab_shmem_driver = { + .probe = hab_shmem_probe, + .remove = hab_shmem_remove, + .driver = { + .name = "hab_shmem", + .of_match_table = of_match_ptr(hab_shmem_match_table), + }, +}; + +static int __init hab_shmem_init(void) +{ + return platform_driver_register(&hab_shmem_driver); +} + +static void __exit hab_shmem_exit(void) +{ + platform_driver_unregister(&hab_shmem_driver); +} + +core_initcall(hab_shmem_init); +module_exit(hab_shmem_exit); + +MODULE_DESCRIPTION("Hypervisor shared memory driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/hab/hab_qvm.h b/drivers/soc/qcom/hab/hab_qvm.h new file mode 100644 index 000000000000..e94b82f87942 --- /dev/null +++ b/drivers/soc/qcom/hab/hab_qvm.h @@ -0,0 +1,47 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __HAB_QNX_H +#define __HAB_QNX_H +#include "hab.h" +#include "hab_pipe.h" + +#include <guest_shm.h> +#include <linux/stddef.h> + +#define PULSE_CODE_NOTIFY 0 +#define PULSE_CODE_INPUT 1 + +struct qvm_channel { + int be; + + struct hab_pipe *pipe; + struct hab_pipe_endpoint *pipe_ep; + spinlock_t io_lock; + struct tasklet_struct task; + struct guest_shm_factory *guest_factory; + struct guest_shm_control *guest_ctrl; + uint32_t idx; + + int channel; + int coid; + + unsigned int guest_intr; + unsigned int guest_iid; +}; + +/* Shared mem size in each direction for communication pipe */ +#define PIPE_SHMEM_SIZE (128 * 1024) + +void *qnx_hyp_rx_dispatch(void *data); + +#endif /* __HAB_QNX_H */ diff --git a/drivers/soc/qcom/hab/hab_vchan.c b/drivers/soc/qcom/hab/hab_vchan.c new file mode 100644 index 000000000000..75a3fad68ab5 --- /dev/null +++ b/drivers/soc/qcom/hab/hab_vchan.c @@ -0,0 +1,184 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include "hab.h" + +struct virtual_channel * +hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan) +{ + int id; + struct virtual_channel *vchan; + + if (!pchan || !ctx) + return NULL; + + vchan = kzalloc(sizeof(*vchan), GFP_KERNEL); + if (!vchan) + return NULL; + + /* This should be the first thing we do in this function */ + idr_preload(GFP_KERNEL); + spin_lock_bh(&pchan->vid_lock); + id = idr_alloc(&pchan->vchan_idr, vchan, 1, 256, GFP_NOWAIT); + spin_unlock_bh(&pchan->vid_lock); + idr_preload_end(); + + if (id < 0) { + kfree(vchan); + return NULL; + } + mb(); /* id must be generated done before pchan_get */ + + hab_pchan_get(pchan); + vchan->pchan = pchan; + vchan->id = ((id << HAB_VCID_ID_SHIFT) & HAB_VCID_ID_MASK) | + ((pchan->habdev->id << HAB_VCID_MMID_SHIFT) & + HAB_VCID_MMID_MASK) | + ((pchan->dom_id << HAB_VCID_DOMID_SHIFT) & + HAB_VCID_DOMID_MASK); + spin_lock_init(&vchan->rx_lock); + INIT_LIST_HEAD(&vchan->rx_list); + init_waitqueue_head(&vchan->rx_queue); + + kref_init(&vchan->refcount); + kref_init(&vchan->usagecnt); + vchan->otherend_closed = pchan->closed; + + hab_ctx_get(ctx); + vchan->ctx = ctx; + + return vchan; +} + +static void +hab_vchan_free(struct kref *ref) +{ + int found; + struct virtual_channel *vchan = + container_of(ref, struct virtual_channel, refcount); + struct hab_message *message, *msg_tmp; + struct export_desc *exp; + struct physical_channel *pchan = vchan->pchan; + struct uhab_context *ctx = vchan->ctx; + + list_for_each_entry_safe(message, msg_tmp, &vchan->rx_list, node) { + list_del(&message->node); + hab_msg_free(message); + } + + do { + found = 0; + write_lock(&ctx->exp_lock); + list_for_each_entry(exp, &ctx->exp_whse, node) { + if (exp->vcid_local == vchan->id) { + list_del(&exp->node); + found = 1; + break; + } + } + write_unlock(&ctx->exp_lock); + if (found) { + habmem_hyp_revoke(exp->payload, exp->payload_count); + habmem_remove_export(exp); + } + } while (found); + + do { + found = 0; + spin_lock_bh(&ctx->imp_lock); + list_for_each_entry(exp, &ctx->imp_whse, node) { + if (exp->vcid_remote == vchan->id) { + list_del(&exp->node); + found = 1; + break; + } + } + spin_unlock_bh(&ctx->imp_lock); + if (found) { + habmm_imp_hyp_unmap(ctx->import_ctx, + exp->import_index, + exp->payload_count, + ctx->kernel); + ctx->import_total--; + kfree(exp); + } + } while (found); + + spin_lock_bh(&pchan->vid_lock); + idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id)); + spin_unlock_bh(&pchan->vid_lock); + + hab_pchan_put(pchan); + hab_ctx_put(ctx); + + kfree(vchan); +} + +struct virtual_channel* +hab_vchan_get(struct physical_channel *pchan, uint32_t vchan_id) +{ + struct virtual_channel *vchan; + + spin_lock_bh(&pchan->vid_lock); + vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id)); + if (vchan) + if (!kref_get_unless_zero(&vchan->refcount)) + vchan = NULL; + spin_unlock_bh(&pchan->vid_lock); + + return vchan; +} + +void hab_vchan_stop(struct virtual_channel *vchan) +{ + if (vchan) { + vchan->otherend_closed = 1; + wake_up_interruptible(&vchan->rx_queue); + } +} + +void hab_vchan_stop_notify(struct virtual_channel *vchan) +{ + hab_send_close_msg(vchan); + hab_vchan_stop(vchan); +} + + +int hab_vchan_find_domid(struct virtual_channel *vchan) +{ + return vchan ? vchan->pchan->dom_id : -1; +} + +static void +hab_vchan_free_deferred(struct work_struct *work) +{ + struct virtual_channel *vchan = + container_of(work, struct virtual_channel, work); + + hab_vchan_free(&vchan->refcount); +} + +static void +hab_vchan_schedule_free(struct kref *ref) +{ + struct virtual_channel *vchan = + container_of(ref, struct virtual_channel, refcount); + + INIT_WORK(&vchan->work, hab_vchan_free_deferred); + schedule_work(&vchan->work); +} + +void hab_vchan_put(struct virtual_channel *vchan) +{ + if (vchan) + kref_put(&vchan->refcount, hab_vchan_schedule_free); +} diff --git a/drivers/soc/qcom/hab/khab.c b/drivers/soc/qcom/hab/khab.c new file mode 100644 index 000000000000..f7499773ae42 --- /dev/null +++ b/drivers/soc/qcom/hab/khab.c @@ -0,0 +1,140 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/module.h> +#include "hab.h" + +int32_t habmm_socket_open(int32_t *handle, uint32_t mm_ip_id, + uint32_t timeout, uint32_t flags) +{ + return hab_vchan_open(hab_driver.kctx, mm_ip_id, handle, flags); +} +EXPORT_SYMBOL(habmm_socket_open); + +int32_t habmm_socket_close(int32_t handle) +{ + hab_vchan_close(hab_driver.kctx, handle); + return 0; +} +EXPORT_SYMBOL(habmm_socket_close); + +int32_t habmm_socket_send(int32_t handle, void *src_buff, + uint32_t size_bytes, uint32_t flags) +{ + struct hab_send param = {0}; + + param.vcid = handle; + param.data = (uint64_t)(uintptr_t)src_buff; + param.sizebytes = size_bytes; + param.flags = flags; + + return hab_vchan_send(hab_driver.kctx, handle, + size_bytes, src_buff, flags); +} +EXPORT_SYMBOL(habmm_socket_send); + +int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes, + uint32_t timeout, uint32_t flags) +{ + int ret = 0; + struct hab_message *msg; + + if (!size_bytes || !dst_buff) + return -EINVAL; + + msg = hab_vchan_recv(hab_driver.kctx, handle, flags); + + if (IS_ERR(msg)) { + *size_bytes = 0; + return PTR_ERR(msg); + } + + if (*size_bytes < msg->sizebytes) { + *size_bytes = 0; + ret = -EINVAL; + } else { + memcpy(dst_buff, msg->data, msg->sizebytes); + *size_bytes = msg->sizebytes; + } + + hab_msg_free(msg); + return ret; +} +EXPORT_SYMBOL(habmm_socket_recv); + +int32_t habmm_export(int32_t handle, void *buff_to_share, uint32_t size_bytes, + uint32_t *export_id, uint32_t flags) +{ + int ret; + struct hab_export param = {0}; + + if (!export_id) + return -EINVAL; + + param.vcid = handle; + param.buffer = (uint64_t)(uintptr_t)buff_to_share; + param.sizebytes = size_bytes; + + ret = hab_mem_export(hab_driver.kctx, ¶m, 1); + + *export_id = param.exportid; + return ret; +} +EXPORT_SYMBOL(habmm_export); + +int32_t habmm_unexport(int32_t handle, uint32_t export_id, uint32_t flags) +{ + struct hab_unexport param = {0}; + + param.vcid = handle; + param.exportid = export_id; + + return hab_mem_unexport(hab_driver.kctx, ¶m, 1); +} +EXPORT_SYMBOL(habmm_unexport); + +int32_t habmm_import(int32_t handle, void **buff_shared, uint32_t size_bytes, + uint32_t export_id, uint32_t flags) +{ + int ret; + struct hab_import param = {0}; + + if (!buff_shared) + return -EINVAL; + + param.vcid = handle; + param.sizebytes = size_bytes; + param.exportid = export_id; + param.flags = flags; + + ret = hab_mem_import(hab_driver.kctx, ¶m, 1); + if (!IS_ERR(ret)) + *buff_shared = (void *)(uintptr_t)param.kva; + + return ret; +} +EXPORT_SYMBOL(habmm_import); + +int32_t habmm_unimport(int32_t handle, + uint32_t export_id, + void *buff_shared, + uint32_t flags) +{ + struct hab_unimport param = {0}; + + param.vcid = handle; + param.exportid = export_id; + param.kva = (uint64_t)(uintptr_t)buff_shared; + + return hab_mem_unimport(hab_driver.kctx, ¶m, 1); +} +EXPORT_SYMBOL(habmm_unimport); diff --git a/drivers/soc/qcom/hab/qvm_comm.c b/drivers/soc/qcom/hab/qvm_comm.c new file mode 100644 index 000000000000..20a631e13794 --- /dev/null +++ b/drivers/soc/qcom/hab/qvm_comm.c @@ -0,0 +1,95 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include "hab.h" +#include "hab_qvm.h" + +static inline void habhyp_notify(void *commdev) +{ + struct qvm_channel *dev = (struct qvm_channel *)commdev; + + if (dev && dev->guest_ctrl) + dev->guest_ctrl->notify = ~0; +} + +int physical_channel_read(struct physical_channel *pchan, + void *payload, + size_t read_size) +{ + struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data; + + if (dev) + return hab_pipe_read(dev->pipe_ep, payload, read_size); + else + return 0; +} + +int physical_channel_send(struct physical_channel *pchan, + struct hab_header *header, + void *payload) +{ + int sizebytes = HAB_HEADER_GET_SIZE(*header); + struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data; + int total_size = sizeof(*header) + sizebytes; + + if (total_size > dev->pipe_ep->tx_info.sh_buf->size) + return -EINVAL; /* too much data for ring */ + + spin_lock_bh(&dev->io_lock); + + if ((dev->pipe_ep->tx_info.sh_buf->size - + (dev->pipe_ep->tx_info.wr_count - + dev->pipe_ep->tx_info.sh_buf->rd_count)) < total_size) { + spin_unlock_bh(&dev->io_lock); + return -EAGAIN; /* not enough free space */ + } + + if (hab_pipe_write(dev->pipe_ep, + (unsigned char *)header, + sizeof(*header)) != sizeof(*header)) { + spin_unlock_bh(&dev->io_lock); + return -EIO; + } + + if (sizebytes) { + if (hab_pipe_write(dev->pipe_ep, + (unsigned char *)payload, + sizebytes) != sizebytes) { + spin_unlock_bh(&dev->io_lock); + return -EIO; + } + } + + hab_pipe_write_commit(dev->pipe_ep); + spin_unlock_bh(&dev->io_lock); + habhyp_notify(dev); + + return 0; +} + +void physical_channel_rx_dispatch(unsigned long data) +{ + struct hab_header header; + struct physical_channel *pchan = (struct physical_channel *)data; + struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data; + + spin_lock_bh(&pchan->rxbuf_lock); + while (1) { + if (hab_pipe_read(dev->pipe_ep, + (unsigned char *)&header, + sizeof(header)) != sizeof(header)) + break; /* no data available */ + + hab_msg_recv(pchan, &header); + } + spin_unlock_bh(&pchan->rxbuf_lock); +} diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 0d534290991b..2326487302fd 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -505,10 +505,10 @@ static int icnss_assign_msa_perm(struct icnss_mem_region_info phys_addr_t addr; u32 size; u32 i = 0; - u32 source_vmids[ICNSS_MAX_VMIDS]; + u32 source_vmids[ICNSS_MAX_VMIDS] = {0}; u32 source_nelems; - u32 dest_vmids[ICNSS_MAX_VMIDS]; - u32 dest_perms[ICNSS_MAX_VMIDS]; + u32 dest_vmids[ICNSS_MAX_VMIDS] = {0}; + u32 dest_perms[ICNSS_MAX_VMIDS] = {0}; u32 dest_nelems; enum icnss_msa_perm cur_perm = mem_region->perm; struct icnss_msa_perm_list_t *new_perm_list, *old_perm_list; diff --git a/drivers/soc/qcom/ipc_router_glink_xprt.c b/drivers/soc/qcom/ipc_router_glink_xprt.c index 7dd1683881fb..0c588c586306 100644 --- a/drivers/soc/qcom/ipc_router_glink_xprt.c +++ b/drivers/soc/qcom/ipc_router_glink_xprt.c @@ -82,6 +82,7 @@ struct ipc_router_glink_xprt { struct msm_ipc_router_xprt xprt; void *ch_hndl; struct workqueue_struct *xprt_wq; + struct wakeup_source notify_rxv_ws; struct rw_semaphore ss_reset_rwlock; int ss_reset; void *pil; @@ -377,6 +378,7 @@ out_read_data: glink_rx_done(glink_xprtp->ch_hndl, rx_work->iovec, reuse_intent); kfree(rx_work); up_read(&glink_xprtp->ss_reset_rwlock); + __pm_relax(&glink_xprtp->notify_rxv_ws); } static void glink_xprt_open_event(struct work_struct *work) @@ -491,6 +493,8 @@ static void glink_xprt_notify_rxv(void *handle, const void *priv, rx_work->iovec_size = size; rx_work->vbuf_provider = vbuf_provider; rx_work->pbuf_provider = pbuf_provider; + if (!glink_xprtp->dynamic_wakeup_source) + __pm_stay_awake(&glink_xprtp->notify_rxv_ws); INIT_WORK(&rx_work->work, glink_xprt_read_data); queue_work(glink_xprtp->xprt_wq, &rx_work->work); } @@ -760,6 +764,7 @@ static int ipc_router_glink_config_init( return -EFAULT; } + wakeup_source_init(&glink_xprtp->notify_rxv_ws, xprt_wq_name); mutex_lock(&glink_xprt_list_lock_lha1); list_add(&glink_xprtp->list, &glink_xprt_list); mutex_unlock(&glink_xprt_list_lock_lha1); diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c index ace2bc4f30b6..d77a12626330 100644 --- a/drivers/soc/qcom/peripheral-loader.c +++ b/drivers/soc/qcom/peripheral-loader.c @@ -56,7 +56,9 @@ #endif #define PIL_NUM_DESC 10 +#define NUM_OF_ENCRYPTED_KEY 3 static void __iomem *pil_info_base; +static void __iomem *pil_minidump_base; /** * proxy_timeout - Override for proxy vote timeouts @@ -80,6 +82,18 @@ struct pil_mdt { }; /** + * struct boot_minidump_smem_region - Representation of SMEM TOC + * @region_name: Name of modem segment to be dumped + * @region_base_address: Where segment start from + * @region_size: Size of segment to be dumped + */ +struct boot_minidump_smem_region { + char region_name[16]; + u64 region_base_address; + u64 region_size; +}; + +/** * struct pil_seg - memory map representing one segment * @next: points to next seg mentor NULL if last segment * @paddr: physical start address of segment @@ -133,11 +147,67 @@ struct pil_priv { phys_addr_t region_end; void *region; struct pil_image_info __iomem *info; + struct md_ssr_ss_info __iomem *minidump; + int minidump_id; int id; int unvoted_flag; size_t region_size; }; +static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev) +{ + struct boot_minidump_smem_region __iomem *region_info; + struct ramdump_segment *ramdump_segs, *s; + struct pil_priv *priv = desc->priv; + void __iomem *subsys_smem_base; + void __iomem *offset; + int ss_mdump_seg_cnt; + int ret, i; + + memcpy(&offset, &priv->minidump, sizeof(priv->minidump)); + offset = offset + sizeof(priv->minidump->md_ss_smem_regions_baseptr); + /* There are 3 encryption keys which also need to be dumped */ + ss_mdump_seg_cnt = readb_relaxed(offset) + + NUM_OF_ENCRYPTED_KEY; + + subsys_smem_base = ioremap(__raw_readl(priv->minidump), + ss_mdump_seg_cnt * sizeof(*region_info)); + region_info = + (struct boot_minidump_smem_region __iomem *)subsys_smem_base; + ramdump_segs = kcalloc(ss_mdump_seg_cnt, + sizeof(*ramdump_segs), GFP_KERNEL); + if (!ramdump_segs) + return -ENOMEM; + + if (desc->subsys_vmid > 0) + ret = pil_assign_mem_to_linux(desc, priv->region_start, + (priv->region_end - priv->region_start)); + + s = ramdump_segs; + for (i = 0; i < ss_mdump_seg_cnt; i++) { + memcpy(&offset, ®ion_info, sizeof(region_info)); + memcpy(&s->name, ®ion_info, sizeof(region_info)); + offset = offset + sizeof(region_info->region_name); + s->address = __raw_readl(offset); + offset = offset + sizeof(region_info->region_base_address); + s->size = __raw_readl(offset); + s++; + region_info++; + } + ret = do_minidump(ramdump_dev, ramdump_segs, ss_mdump_seg_cnt); + kfree(ramdump_segs); + if (ret) + pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n", + __func__, desc->name, ret); + writel_relaxed(0, &priv->minidump->md_ss_smem_regions_baseptr); + writeb_relaxed(1, &priv->minidump->md_ss_ssr_cause); + + if (desc->subsys_vmid > 0) + ret = pil_assign_mem_to_subsys(desc, priv->region_start, + (priv->region_end - priv->region_start)); + return ret; +} + /** * pil_do_ramdump() - Ramdump an image * @desc: descriptor from pil_desc_init() @@ -153,6 +223,9 @@ int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev) int count = 0, ret; struct ramdump_segment *ramdump_segs, *s; + if (priv->minidump && (__raw_readl(priv->minidump) > 0)) + return pil_do_minidump(desc, ramdump_dev); + list_for_each_entry(seg, &priv->segs, list) count++; @@ -1014,9 +1087,10 @@ bool is_timeout_disabled(void) int pil_desc_init(struct pil_desc *desc) { struct pil_priv *priv; - int ret; void __iomem *addr; + int ret, ss_imem_offset_mdump; char buf[sizeof(priv->info->name)]; + struct device_node *ofnode = desc->dev->of_node; if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote, "Invalid proxy voting. Ignoring\n")) @@ -1039,6 +1113,22 @@ int pil_desc_init(struct pil_desc *desc) strncpy(buf, desc->name, sizeof(buf)); __iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4); } + if (of_property_read_u32(ofnode, "qcom,minidump-id", + &priv->minidump_id)) + pr_debug("minidump-id not found for %s\n", desc->name); + else { + ss_imem_offset_mdump = + sizeof(struct md_ssr_ss_info) * priv->minidump_id; + if (pil_minidump_base) { + /* Add 0x4 to get start of struct md_ssr_ss_info base + * from struct md_ssr_toc for any subsystem, + * struct md_ssr_ss_info is actually the pointer + * of ToC in smem for any subsystem. + */ + addr = pil_minidump_base + ss_imem_offset_mdump + 0x4; + priv->minidump = (struct md_ssr_ss_info __iomem *)addr; + } + } ret = pil_parse_devicetree(desc); if (ret) @@ -1148,6 +1238,20 @@ static int __init msm_pil_init(void) for (i = 0; i < resource_size(&res)/sizeof(u32); i++) writel_relaxed(0, pil_info_base + (i * sizeof(u32))); + np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-minidump"); + if (!np) { + pr_warn("pil: failed to find qcom,msm-imem-minidump node\n"); + goto out; + } else { + pil_minidump_base = of_iomap(np, 0); + if (!pil_minidump_base) { + pr_err("unable to map pil minidump imem offset\n"); + goto out; + } + } + for (i = 0; i < sizeof(struct md_ssr_toc)/sizeof(u32); i++) + writel_relaxed(0, pil_minidump_base + (i * sizeof(u32))); + writel_relaxed(1, pil_minidump_base); out: return register_pm_notifier(&pil_pm_notifier); } @@ -1158,6 +1262,8 @@ static void __exit msm_pil_exit(void) unregister_pm_notifier(&pil_pm_notifier); if (pil_info_base) iounmap(pil_info_base); + if (pil_minidump_base) + iounmap(pil_minidump_base); } module_exit(msm_pil_exit); diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h index 0cd2aeae1edd..908ab78124f7 100644 --- a/drivers/soc/qcom/peripheral-loader.h +++ b/drivers/soc/qcom/peripheral-loader.h @@ -74,6 +74,34 @@ struct pil_image_info { __le32 size; } __attribute__((__packed__)); +#define MAX_NUM_OF_SS 3 + +/** + * struct md_ssr_ss_info - Info in imem about smem ToC + * @md_ss_smem_regions_baseptr: Start physical address of SMEM TOC + * @md_ss_num_of_regions: number of segments that need to be dumped + * @md_ss_encryption_status: status of encryption of segments + * @md_ss_ssr_cause: ssr cause enum + */ +struct md_ssr_ss_info { + u32 md_ss_smem_regions_baseptr; + u8 md_ss_num_of_regions; + u8 md_ss_encryption_status; + u8 md_ss_ssr_cause; + u8 reserved; +}; + +/** + * struct md_ssr_toc - Wrapper of struct md_ssr_ss_info + * @md_ssr_toc_init: flag to indicate to MSS SW about imem init done + * @md_ssr_ss: Instance of struct md_ssr_ss_info for a subsystem + */ +struct md_ssr_toc /* Shared IMEM ToC struct */ +{ + u32 md_ssr_toc_init; + struct md_ssr_ss_info md_ssr_ss[MAX_NUM_OF_SS]; +}; + /** * struct pil_reset_ops - PIL operations * @init_image: prepare an image for authentication diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c index 5fcb0f95733c..7ede3e29dcf9 100644 --- a/drivers/soc/qcom/pil-msa.c +++ b/drivers/soc/qcom/pil-msa.c @@ -78,7 +78,8 @@ #define MSS_MAGIC 0XAABADEAD /* CX_IPEAK Parameters */ #define CX_IPEAK_MSS BIT(5) - +/* Timeout value for MBA boot when minidump is enabled */ +#define MBA_ENCRYPTION_TIMEOUT 3000 enum scm_cmd { PAS_MEM_SETUP_CMD = 2, }; @@ -244,7 +245,12 @@ static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv) struct device *dev = drv->desc.dev; int ret; u32 status; - u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000; + u64 val; + + if (of_property_read_bool(dev->of_node, "qcom,minidump-id")) + pbl_mba_boot_timeout_ms = MBA_ENCRYPTION_TIMEOUT; + + val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000; /* Wait for PBL completion. */ ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status, @@ -553,7 +559,7 @@ int pil_mss_reset_load_mba(struct pil_desc *pil) char *fw_name_p; void *mba_dp_virt; dma_addr_t mba_dp_phys, mba_dp_phys_end; - int ret, count; + int ret; const u8 *data; struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev; @@ -577,7 +583,7 @@ int pil_mss_reset_load_mba(struct pil_desc *pil) arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0); - dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8); + dma_dev->coherent_dma_mask = DMA_BIT_MASK(32); init_dma_attrs(&md->attrs_dma); dma_set_attr(DMA_ATTR_SKIP_ZEROING, &md->attrs_dma); @@ -618,10 +624,9 @@ int pil_mss_reset_load_mba(struct pil_desc *pil) &mba_dp_phys, &mba_dp_phys_end, drv->mba_dp_size); /* Load the MBA image into memory */ - count = fw->size; - if (count <= SZ_1M) { + if (fw->size <= SZ_1M) { /* Ensures memcpy is done for max 1MB fw size */ - memcpy(mba_dp_virt, data, count); + memcpy(mba_dp_virt, data, fw->size); } else { dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n", __func__); @@ -689,7 +694,7 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata, trace_pil_func(__func__); - dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8); + dma_dev->coherent_dma_mask = DMA_BIT_MASK(32); dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs); dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs); /* Make metadata physically contiguous and 4K aligned. */ diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c index c712ed392b0b..c8353dc8a43a 100644 --- a/drivers/soc/qcom/ramdump.c +++ b/drivers/soc/qcom/ramdump.c @@ -29,6 +29,8 @@ #include <linux/of.h> #define RAMDUMP_WAIT_MSECS 120000 +#define MAX_STRTBL_SIZE 512 +#define MAX_NAME_LENGTH 16 struct ramdump_device { char name[256]; @@ -391,12 +393,143 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments, } +static inline struct elf_shdr *elf_sheader(struct elfhdr *hdr) +{ + return (struct elf_shdr *)((size_t)hdr + (size_t)hdr->e_shoff); +} + +static inline struct elf_shdr *elf_section(struct elfhdr *hdr, int idx) +{ + return &elf_sheader(hdr)[idx]; +} + +static inline char *elf_str_table(struct elfhdr *hdr) +{ + if (hdr->e_shstrndx == SHN_UNDEF) + return NULL; + return (char *)hdr + elf_section(hdr, hdr->e_shstrndx)->sh_offset; +} + +static inline unsigned int set_section_name(const char *name, + struct elfhdr *ehdr) +{ + char *strtab = elf_str_table(ehdr); + static int strtable_idx = 1; + int idx, ret = 0; + + idx = strtable_idx; + if ((strtab == NULL) || (name == NULL)) + return 0; + + ret = idx; + idx += strlcpy((strtab + idx), name, MAX_NAME_LENGTH); + strtable_idx = idx + 1; + + return ret; +} + +static int _do_minidump(void *handle, struct ramdump_segment *segments, + int nsegments) +{ + int ret, i; + struct ramdump_device *rd_dev = (struct ramdump_device *)handle; + struct elfhdr *ehdr; + struct elf_shdr *shdr; + unsigned long offset, strtbl_off; + + if (!rd_dev->consumer_present) { + pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name); + return -EPIPE; + } + + rd_dev->segments = segments; + rd_dev->nsegments = nsegments; + + rd_dev->elfcore_size = sizeof(*ehdr) + + (sizeof(*shdr) * (nsegments + 2)) + MAX_STRTBL_SIZE; + ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL); + rd_dev->elfcore_buf = (char *)ehdr; + if (!rd_dev->elfcore_buf) + return -ENOMEM; + + memcpy(ehdr->e_ident, ELFMAG, SELFMAG); + ehdr->e_ident[EI_CLASS] = ELF_CLASS; + ehdr->e_ident[EI_DATA] = ELF_DATA; + ehdr->e_ident[EI_VERSION] = EV_CURRENT; + ehdr->e_ident[EI_OSABI] = ELF_OSABI; + ehdr->e_type = ET_CORE; + ehdr->e_machine = ELF_ARCH; + ehdr->e_version = EV_CURRENT; + ehdr->e_ehsize = sizeof(*ehdr); + ehdr->e_shoff = sizeof(*ehdr); + ehdr->e_shentsize = sizeof(*shdr); + ehdr->e_shstrndx = 1; + + + offset = rd_dev->elfcore_size; + shdr = (struct elf_shdr *)(ehdr + 1); + strtbl_off = sizeof(*ehdr) + sizeof(*shdr) * (nsegments + 2); + shdr++; + shdr->sh_type = SHT_STRTAB; + shdr->sh_offset = (elf_addr_t)strtbl_off; + shdr->sh_size = MAX_STRTBL_SIZE; + shdr->sh_entsize = 0; + shdr->sh_flags = 0; + shdr->sh_name = set_section_name("STR_TBL", ehdr); + shdr++; + + for (i = 0; i < nsegments; i++, shdr++) { + /* Update elf header */ + shdr->sh_type = SHT_PROGBITS; + shdr->sh_name = set_section_name(segments[i].name, ehdr); + shdr->sh_addr = (elf_addr_t)segments[i].address; + shdr->sh_size = segments[i].size; + shdr->sh_flags = SHF_WRITE; + shdr->sh_offset = offset; + shdr->sh_entsize = 0; + offset += shdr->sh_size; + } + ehdr->e_shnum = nsegments + 2; + + rd_dev->data_ready = 1; + rd_dev->ramdump_status = -1; + + reinit_completion(&rd_dev->ramdump_complete); + + /* Tell userspace that the data is ready */ + wake_up(&rd_dev->dump_wait_q); + + /* Wait (with a timeout) to let the ramdump complete */ + ret = wait_for_completion_timeout(&rd_dev->ramdump_complete, + msecs_to_jiffies(RAMDUMP_WAIT_MSECS)); + + if (!ret) { + pr_err("Ramdump(%s): Timed out waiting for userspace.\n", + rd_dev->name); + ret = -EPIPE; + } else { + ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE; + } + + rd_dev->data_ready = 0; + rd_dev->elfcore_size = 0; + kfree(rd_dev->elfcore_buf); + rd_dev->elfcore_buf = NULL; + return ret; +} + int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments) { return _do_ramdump(handle, segments, nsegments, false); } EXPORT_SYMBOL(do_ramdump); +int do_minidump(void *handle, struct ramdump_segment *segments, int nsegments) +{ + return _do_minidump(handle, segments, nsegments); +} +EXPORT_SYMBOL(do_minidump); + int do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments) { diff --git a/drivers/soc/qcom/scm_qcpe.c b/drivers/soc/qcom/scm_qcpe.c index 1e369c73e34b..3f2b05a0ec9e 100644 --- a/drivers/soc/qcom/scm_qcpe.c +++ b/drivers/soc/qcom/scm_qcpe.c @@ -1027,55 +1027,56 @@ int scm_is_call_available(u32 svc_id, u32 cmd_id) ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd, sizeof(svc_cmd), &ret_val, sizeof(ret_val)); - if (ret) - return ret; + if (!ret && ret_val) + return 1; + else + return 0; return ret_val; } desc.arginfo = SCM_ARGS(1); desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id); + desc.ret[0] = 0; ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc); - if (ret) - return ret; - - return desc.ret[0]; + if (!ret && desc.ret[0]) + return 1; + else + return 0; } EXPORT_SYMBOL(scm_is_call_available); #define GET_FEAT_VERSION_CMD 3 -int scm_get_feat_version(u32 feat) +int scm_get_feat_version(u32 feat, u64 *scm_ret) { struct scm_desc desc = {0}; int ret; if (!is_scm_armv8()) { if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) { - u32 version; - - if (!scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat, - sizeof(feat), &version, sizeof(version))) - return version; + ret = scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, + &feat, sizeof(feat), scm_ret, sizeof(*scm_ret)); + return ret; } return 0; } ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD); if (ret <= 0) - return 0; + return -EAGAIN; desc.args[0] = feat; desc.arginfo = SCM_ARGS(1); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, GET_FEAT_VERSION_CMD), &desc); - if (!ret) - return desc.ret[0]; - return 0; + *scm_ret = desc.ret[0]; + + return ret; } EXPORT_SYMBOL(scm_get_feat_version); #define RESTORE_SEC_CFG 2 -int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret) +int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret) { struct scm_desc desc = {0}; int ret; diff --git a/drivers/soc/qcom/smp2p_spinlock_test.c b/drivers/soc/qcom/smp2p_spinlock_test.c index 74aac52b5285..1fe4411eebde 100644 --- a/drivers/soc/qcom/smp2p_spinlock_test.c +++ b/drivers/soc/qcom/smp2p_spinlock_test.c @@ -1,6 +1,6 @@ /* drivers/soc/qcom/smp2p_spinlock_test.c * - * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -522,7 +522,7 @@ static void smp2p_ut_remote_spinlock_ssr(struct seq_file *s) int spinlock_owner = 0; struct workqueue_struct *ws = NULL; - struct rmt_spinlock_work_item work_item; + struct rmt_spinlock_work_item work_item = { .has_locked = false }; seq_printf(s, " Running %s Test\n", __func__); diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c index bf89cb63ce4a..68199d9adb02 100644 --- a/drivers/soc/qcom/spcom.c +++ b/drivers/soc/qcom/spcom.c @@ -493,13 +493,10 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event) ch->glink_state = event; - /* - * if spcom_notify_state() is called within glink_open() - * then ch->glink_handle is not updated yet. - */ - if (!ch->glink_handle) { - pr_debug("update glink_handle, ch [%s].\n", ch->name); - ch->glink_handle = handle; + if (!handle) { + pr_err("inavlid glink_handle, ch [%s].\n", ch->name); + mutex_unlock(&ch->lock); + return; } /* signal before unlock mutex & before calling glink */ @@ -512,8 +509,7 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event) */ pr_debug("call glink_queue_rx_intent() ch [%s].\n", ch->name); - ret = glink_queue_rx_intent(ch->glink_handle, - ch, ch->rx_buf_size); + ret = glink_queue_rx_intent(handle, ch, ch->rx_buf_size); if (ret) { pr_err("glink_queue_rx_intent() err [%d]\n", ret); } else { @@ -1028,10 +1024,12 @@ static int spcom_get_next_request_size(struct spcom_channel *ch) ch->name, ch->actual_rx_size); goto exit_ready; } + mutex_unlock(&ch->lock); /* unlock while waiting */ pr_debug("Wait for Rx Done, ch [%s].\n", ch->name); wait_for_completion(&ch->rx_done); + mutex_lock(&ch->lock); /* re-lock after waiting */ /* Check Rx Abort on SP reset */ if (ch->rx_abort) { pr_err("rx aborted.\n"); @@ -1138,6 +1136,7 @@ struct spcom_client *spcom_register_client(struct spcom_client_info *info) ch = spcom_find_channel_by_name(name); if (!ch) { pr_err("channel %s doesn't exist, load App first.\n", name); + kfree(client); return NULL; } @@ -1325,6 +1324,7 @@ struct spcom_server *spcom_register_service(struct spcom_service_info *info) ch = spcom_find_channel_by_name(name); if (!ch) { pr_err("channel %s doesn't exist, load App first.\n", name); + kfree(server); return NULL; } @@ -2028,6 +2028,7 @@ static int spcom_handle_get_req_size(struct spcom_channel *ch, void *buf, uint32_t size) { + int ret = -1; uint32_t next_req_size = 0; if (size < sizeof(next_req_size)) { @@ -2035,7 +2036,10 @@ static int spcom_handle_get_req_size(struct spcom_channel *ch, return -EINVAL; } - next_req_size = spcom_get_next_request_size(ch); + ret = spcom_get_next_request_size(ch); + if (ret < 0) + return ret; + next_req_size = (uint32_t) ret; memcpy(buf, &next_req_size, sizeof(next_req_size)); pr_debug("next_req_size [%d].\n", next_req_size); @@ -2140,18 +2144,20 @@ static int spcom_handle_read(struct spcom_channel *ch, void *buf, uint32_t size) { + int ret = -1; + if (size == SPCOM_GET_NEXT_REQUEST_SIZE) { pr_debug("get next request size, ch [%s].\n", ch->name); ch->is_server = true; - size = spcom_handle_get_req_size(ch, buf, size); + ret = spcom_handle_get_req_size(ch, buf, size); } else { pr_debug("get request/response, ch [%s].\n", ch->name); - size = spcom_handle_read_req_resp(ch, buf, size); + ret = spcom_handle_read_req_resp(ch, buf, size); } pr_debug("ch [%s] , size = %d.\n", ch->name, size); - return size; + return ret; } /*======================================================================*/ @@ -2303,6 +2309,7 @@ static ssize_t spcom_device_write(struct file *filp, char *buf; struct spcom_channel *ch; const char *name = file_to_filename(filp); + int buf_size = 0; pr_debug("Write file [%s] size [%d] pos [%d].\n", name, (int) size, (int) *f_pos); @@ -2329,6 +2336,7 @@ static ssize_t spcom_device_write(struct file *filp, (int) size , (int) SPCOM_MAX_COMMAND_SIZE); return -EINVAL; } + buf_size = size; /* explicit casting size_t to int */ if (*f_pos != 0) { pr_err("offset should be zero, no sparse buffer.\n"); @@ -2346,7 +2354,7 @@ static ssize_t spcom_device_write(struct file *filp, return -EFAULT; } - ret = spcom_handle_write(ch, buf, size); + ret = spcom_handle_write(ch, buf, buf_size); if (ret) { pr_err("handle command error [%d].\n", ret); kfree(buf); @@ -2374,6 +2382,7 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff, char *buf; struct spcom_channel *ch; const char *name = file_to_filename(filp); + uint32_t buf_size = 0; pr_debug("Read file [%s], size = %d bytes.\n", name, (int) size); @@ -2382,6 +2391,7 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff, pr_err("invalid parameters.\n"); return -EINVAL; } + buf_size = size; /* explicit casting size_t to uint32_t */ ch = filp->private_data; @@ -2399,7 +2409,7 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff, if (buf == NULL) return -ENOMEM; - ret = spcom_handle_read(ch, buf, size); + ret = spcom_handle_read(ch, buf, buf_size); if (ret < 0) { pr_err("read error [%d].\n", ret); kfree(buf); diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c index 5fe3c572628b..ee88a8aaf850 100644 --- a/drivers/soc/qcom/wcd-dsp-glink.c +++ b/drivers/soc/qcom/wcd-dsp-glink.c @@ -570,7 +570,7 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv, mutex_lock(&wpriv->glink_mutex); if (wpriv->ch) { - dev_err(wpriv->dev, "%s: glink ch memory is already allocated\n", + dev_err_ratelimited(wpriv->dev, "%s: glink ch memory is already allocated\n", __func__); ret = -EINVAL; goto done; @@ -579,7 +579,7 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv, no_of_channels = pkt->no_of_channels; if (no_of_channels > WDSP_MAX_NO_OF_CHANNELS) { - dev_err(wpriv->dev, "%s: no_of_channels: %d but max allowed are %d\n", + dev_err_ratelimited(wpriv->dev, "%s: no_of_channels: %d but max allowed are %d\n", __func__, no_of_channels, WDSP_MAX_NO_OF_CHANNELS); ret = -EINVAL; goto done; @@ -598,20 +598,20 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv, size += WDSP_CH_CFG_SIZE; if (size > pkt_size) { - dev_err(wpriv->dev, "%s: Invalid size = %zd, pkt_size = %zd\n", + dev_err_ratelimited(wpriv->dev, "%s: Invalid size = %zd, pkt_size = %zd\n", __func__, size, pkt_size); ret = -EINVAL; goto err_ch_mem; } if (ch_cfg->no_of_intents > WDSP_MAX_NO_OF_INTENTS) { - dev_err(wpriv->dev, "%s: Invalid no_of_intents = %d\n", + dev_err_ratelimited(wpriv->dev, "%s: Invalid no_of_intents = %d\n", __func__, ch_cfg->no_of_intents); ret = -EINVAL; goto err_ch_mem; } size += (sizeof(u32) * ch_cfg->no_of_intents); if (size > pkt_size) { - dev_err(wpriv->dev, "%s: Invalid size = %zd, pkt_size = %zd\n", + dev_err_ratelimited(wpriv->dev, "%s: Invalid size = %zd, pkt_size = %zd\n", __func__, size, pkt_size); ret = -EINVAL; goto err_ch_mem; @@ -634,6 +634,21 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv, memcpy(&ch[i]->ch_cfg, payload, ch_cfg_size); payload += ch_cfg_size; + /* check ch name is valid string or not */ + for (j = 0; j < WDSP_CH_NAME_MAX_LEN; j++) { + if (ch[i]->ch_cfg.name[j] == '\0') + break; + } + + if (j == WDSP_CH_NAME_MAX_LEN) { + dev_err_ratelimited(wpriv->dev, "%s: Wrong channel name\n", + __func__); + kfree(ch[i]); + ch[i] = NULL; + ret = -EINVAL; + goto err_ch_mem; + } + mutex_init(&ch[i]->mutex); ch[i]->wpriv = wpriv; INIT_WORK(&ch[i]->lcl_ch_open_wrk, wdsp_glink_lcl_ch_open_wrk); @@ -746,7 +761,7 @@ static ssize_t wdsp_glink_read(struct file *file, char __user *buf, } if (count > WDSP_MAX_READ_SIZE) { - dev_info(wpriv->dev, "%s: count = %zd is more than WDSP_MAX_READ_SIZE\n", + dev_info_ratelimited(wpriv->dev, "%s: count = %zd is more than WDSP_MAX_READ_SIZE\n", __func__, count); count = WDSP_MAX_READ_SIZE; } @@ -778,7 +793,7 @@ static ssize_t wdsp_glink_read(struct file *file, char __user *buf, if (ret1) { mutex_unlock(&wpriv->rsp_mutex); - dev_err(wpriv->dev, "%s: copy_to_user failed %d\n", + dev_err_ratelimited(wpriv->dev, "%s: copy_to_user failed %d\n", __func__, ret); ret = -EFAULT; goto done; @@ -824,7 +839,7 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, if ((count < WDSP_WRITE_PKT_SIZE) || (count > WDSP_MAX_WRITE_SIZE)) { - dev_err(wpriv->dev, "%s: Invalid count = %zd\n", + dev_err_ratelimited(wpriv->dev, "%s: Invalid count = %zd\n", __func__, count); ret = -EINVAL; goto done; @@ -841,7 +856,7 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, ret = copy_from_user(tx_buf->buf, buf, count); if (ret) { - dev_err(wpriv->dev, "%s: copy_from_user failed %d\n", + dev_err_ratelimited(wpriv->dev, "%s: copy_from_user failed %d\n", __func__, ret); ret = -EFAULT; goto free_buf; @@ -852,7 +867,7 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, case WDSP_REG_PKT: if (count < (WDSP_WRITE_PKT_SIZE + WDSP_REG_PKT_SIZE + WDSP_CH_CFG_SIZE)) { - dev_err(wpriv->dev, "%s: Invalid reg pkt size = %zd\n", + dev_err_ratelimited(wpriv->dev, "%s: Invalid reg pkt size = %zd\n", __func__, count); ret = -EINVAL; goto free_buf; @@ -861,7 +876,7 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, (struct wdsp_reg_pkt *)wpkt->payload, count); if (IS_ERR_VALUE(ret)) - dev_err(wpriv->dev, "%s: glink register failed, ret = %d\n", + dev_err_ratelimited(wpriv->dev, "%s: glink register failed, ret = %d\n", __func__, ret); vfree(tx_buf); break; @@ -871,7 +886,7 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, GLINK_LINK_STATE_UP), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { - dev_err(wpriv->dev, "%s: Link state wait timeout\n", + dev_err_ratelimited(wpriv->dev, "%s: Link state wait timeout\n", __func__); ret = -ETIMEDOUT; goto free_buf; @@ -881,7 +896,7 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, break; case WDSP_CMD_PKT: if (count <= (WDSP_WRITE_PKT_SIZE + WDSP_CMD_PKT_SIZE)) { - dev_err(wpriv->dev, "%s: Invalid cmd pkt size = %zd\n", + dev_err_ratelimited(wpriv->dev, "%s: Invalid cmd pkt size = %zd\n", __func__, count); ret = -EINVAL; goto free_buf; @@ -889,7 +904,7 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, mutex_lock(&wpriv->glink_mutex); if (wpriv->glink_state.link_state == GLINK_LINK_STATE_DOWN) { mutex_unlock(&wpriv->glink_mutex); - dev_err(wpriv->dev, "%s: Link state is Down\n", + dev_err_ratelimited(wpriv->dev, "%s: Link state is Down\n", __func__); ret = -ENETRESET; @@ -901,13 +916,11 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, sizeof(struct wdsp_cmd_pkt) + cpkt->payload_size; if (count < pkt_max_size) { - dev_err(wpriv->dev, "%s: Invalid cmd pkt count = %zd, pkt_size = %zd\n", + dev_err_ratelimited(wpriv->dev, "%s: Invalid cmd pkt count = %zd, pkt_size = %zd\n", __func__, count, pkt_max_size); ret = -EINVAL; goto free_buf; } - dev_dbg(wpriv->dev, "%s: requested ch_name: %s, pkt_size: %zd\n", - __func__, cpkt->ch_name, pkt_max_size); for (i = 0; i < wpriv->no_of_channels; i++) { if (wpriv->ch && wpriv->ch[i] && (!strcmp(cpkt->ch_name, @@ -917,18 +930,20 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, } } if (!tx_buf->ch) { - dev_err(wpriv->dev, "%s: Failed to get glink channel\n", + dev_err_ratelimited(wpriv->dev, "%s: Failed to get glink channel\n", __func__); ret = -EINVAL; goto free_buf; } + dev_dbg(wpriv->dev, "%s: requested ch_name: %s, pkt_size: %zd\n", + __func__, cpkt->ch_name, pkt_max_size); ret = wait_event_timeout(tx_buf->ch->ch_connect_wait, (tx_buf->ch->channel_state == GLINK_CONNECTED), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { - dev_err(wpriv->dev, "%s: glink channel %s is not in connected state %d\n", + dev_err_ratelimited(wpriv->dev, "%s: glink channel %s is not in connected state %d\n", __func__, tx_buf->ch->ch_cfg.name, tx_buf->ch->channel_state); ret = -ETIMEDOUT; @@ -940,7 +955,8 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, queue_work(wpriv->work_queue, &tx_buf->tx_work); break; default: - dev_err(wpriv->dev, "%s: Invalid packet type\n", __func__); + dev_err_ratelimited(wpriv->dev, "%s: Invalid packet type\n", + __func__); ret = -EINVAL; vfree(tx_buf); break; @@ -986,6 +1002,7 @@ static int wdsp_glink_open(struct inode *inode, struct file *file) goto err_wq; } + wpriv->glink_state.link_state = GLINK_LINK_STATE_DOWN; init_completion(&wpriv->rsp_complete); init_waitqueue_head(&wpriv->link_state_wait); mutex_init(&wpriv->rsp_mutex); diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c index 4499dd35f2dd..da58f19dd6e6 100644 --- a/drivers/spi/spi_qsd.c +++ b/drivers/spi/spi_qsd.c @@ -1827,14 +1827,16 @@ static int msm_spi_setup(struct spi_device *spi) mb(); if (dd->pdata->is_shared) put_local_resources(dd); - /* Counter-part of system-resume when runtime-pm is not enabled. */ - if (!pm_runtime_enabled(dd->dev)) - msm_spi_pm_suspend_runtime(dd->dev); no_resources: mutex_unlock(&dd->core_lock); - pm_runtime_mark_last_busy(dd->dev); - pm_runtime_put_autosuspend(dd->dev); + /* Counter-part of system-resume when runtime-pm is not enabled. */ + if (!pm_runtime_enabled(dd->dev)) { + msm_spi_pm_suspend_runtime(dd->dev); + } else { + pm_runtime_mark_last_busy(dd->dev); + pm_runtime_put_autosuspend(dd->dev); + } err_setup_exit: return rc; diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c index 90ae7eb65b65..6102b1765182 100644 --- a/drivers/staging/android/ion/ion_cma_secure_heap.c +++ b/drivers/staging/android/ion/ion_cma_secure_heap.c @@ -3,7 +3,7 @@ * * Copyright (C) Linaro 2012 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. - * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -344,7 +344,8 @@ static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap, } -void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr) +static unsigned long +__ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr) { struct list_head *entry, *_n; unsigned long drained_size = 0, skipped_size = 0; @@ -368,6 +369,7 @@ void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr) } trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size); + return drained_size; } int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused) @@ -385,6 +387,7 @@ int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused) static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker, struct shrink_control *sc) { + unsigned long freed; struct ion_cma_secure_heap *sheap = container_of(shrinker, struct ion_cma_secure_heap, shrinker); int nr_to_scan = sc->nr_to_scan; @@ -397,11 +400,11 @@ static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker, if (!mutex_trylock(&sheap->chunk_lock)) return -1; - __ion_secure_cma_shrink_pool(sheap, nr_to_scan); + freed = __ion_secure_cma_shrink_pool(sheap, nr_to_scan); mutex_unlock(&sheap->chunk_lock); - return atomic_read(&sheap->total_pool_size); + return freed; } static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker, diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index 513d015a5ace..c19b87d10df0 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -183,7 +183,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, freed += (1 << pool->order); } - return ion_page_pool_total(pool, high); + return freed; } struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask, diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c index d97aa2827412..8eb7179da342 100644 --- a/drivers/staging/iio/resolver/ad2s1210.c +++ b/drivers/staging/iio/resolver/ad2s1210.c @@ -468,7 +468,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev, long m) { struct ad2s1210_state *st = iio_priv(indio_dev); - bool negative; + u16 negative; int ret = 0; u16 pos; s16 vel; diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 02c3feef4e36..c2d2c17550a7 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -49,6 +49,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ {} /* Terminating entry */ }; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index a180c000e246..1ff1c83e2df5 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -418,6 +418,7 @@ int iscsit_reset_np_thread( return 0; } np->np_thread_state = ISCSI_NP_THREAD_RESET; + atomic_inc(&np->np_reset_count); if (np->np_thread) { spin_unlock_bh(&np->np_thread_lock); @@ -1996,6 +1997,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); cmd->data_direction = DMA_NONE; + kfree(cmd->text_in_ptr); cmd->text_in_ptr = NULL; return 0; @@ -3965,6 +3967,8 @@ int iscsi_target_tx_thread(void *arg) { int ret = 0; struct iscsi_conn *conn = arg; + bool conn_freed = false; + /* * Allow ourselves to be interrupted by SIGINT so that a * connection recovery / failure event can be triggered externally. @@ -3990,12 +3994,14 @@ get_immediate: goto transport_err; ret = iscsit_handle_response_queue(conn); - if (ret == 1) + if (ret == 1) { goto get_immediate; - else if (ret == -ECONNRESET) + } else if (ret == -ECONNRESET) { + conn_freed = true; goto out; - else if (ret < 0) + } else if (ret < 0) { goto transport_err; + } } transport_err: @@ -4005,8 +4011,13 @@ transport_err: * responsible for cleaning up the early connection failure. */ if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) - iscsit_take_action_for_connection_exit(conn); + iscsit_take_action_for_connection_exit(conn, &conn_freed); out: + if (!conn_freed) { + while (!kthread_should_stop()) { + msleep(100); + } + } return 0; } @@ -4105,6 +4116,7 @@ int iscsi_target_rx_thread(void *arg) u32 checksum = 0, digest = 0; struct iscsi_conn *conn = arg; struct kvec iov; + bool conn_freed = false; /* * Allow ourselves to be interrupted by SIGINT so that a * connection recovery / failure event can be triggered externally. @@ -4116,7 +4128,7 @@ int iscsi_target_rx_thread(void *arg) */ rc = wait_for_completion_interruptible(&conn->rx_login_comp); if (rc < 0 || iscsi_target_check_conn_state(conn)) - return 0; + goto out; if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { struct completion comp; @@ -4201,7 +4213,13 @@ int iscsi_target_rx_thread(void *arg) transport_err: if (!signal_pending(current)) atomic_set(&conn->transport_failed, 1); - iscsit_take_action_for_connection_exit(conn); + iscsit_take_action_for_connection_exit(conn, &conn_freed); +out: + if (!conn_freed) { + while (!kthread_should_stop()) { + msleep(100); + } + } return 0; } @@ -4575,8 +4593,11 @@ static void iscsit_logout_post_handler_closesession( * always sleep waiting for RX/TX thread shutdown to complete * within iscsit_close_connection(). */ - if (conn->conn_transport->transport_type == ISCSI_TCP) + if (conn->conn_transport->transport_type == ISCSI_TCP) { sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); @@ -4592,8 +4613,11 @@ static void iscsit_logout_post_handler_samecid( { int sleep = 1; - if (conn->conn_transport->transport_type == ISCSI_TCP) + if (conn->conn_transport->transport_type == ISCSI_TCP) { sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 210f6e4830e3..6c88fb021444 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn) } } -void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) +void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed) { + *conn_freed = false; + spin_lock_bh(&conn->state_lock); if (atomic_read(&conn->connection_exit)) { spin_unlock_bh(&conn->state_lock); @@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { spin_unlock_bh(&conn->state_lock); iscsit_close_connection(conn); + *conn_freed = true; return; } @@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) spin_unlock_bh(&conn->state_lock); iscsit_handle_connection_cleanup(conn); + *conn_freed = true; } diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h index a9e2f9497fb2..fbc1d84a63c3 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.h +++ b/drivers/target/iscsi/iscsi_target_erl0.h @@ -9,6 +9,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *); extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); extern void iscsit_fall_back_to_erl0(struct iscsi_session *); -extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); +extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *); #endif /*** ISCSI_TARGET_ERL0_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 4a137b0ae3dc..bc2cbffec27e 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1219,9 +1219,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) flush_signals(current); spin_lock_bh(&np->np_thread_lock); - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { + if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; + spin_unlock_bh(&np->np_thread_lock); complete(&np->np_restart_comp); + return 1; } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { spin_unlock_bh(&np->np_thread_lock); goto exit; @@ -1254,7 +1256,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) goto exit; } else if (rc < 0) { spin_lock_bh(&np->np_thread_lock); - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { + if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; spin_unlock_bh(&np->np_thread_lock); complete(&np->np_restart_comp); iscsit_put_transport(conn->conn_transport); @@ -1436,5 +1439,9 @@ int iscsi_target_login_thread(void *arg) break; } + while (!kthread_should_stop()) { + msleep(100); + } + return 0; } diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 549a2bbbf4df..58c629aec73c 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -489,14 +489,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn) static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); -static bool iscsi_target_sk_state_check(struct sock *sk) +static bool __iscsi_target_sk_check_close(struct sock *sk) { if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { - pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," + pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," "returning FALSE\n"); - return false; + return true; } - return true; + return false; +} + +static bool iscsi_target_sk_check_close(struct iscsi_conn *conn) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + read_lock_bh(&sk->sk_callback_lock); + state = (__iscsi_target_sk_check_close(sk) || + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); + read_unlock_bh(&sk->sk_callback_lock); + } + return state; +} + +static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + read_lock_bh(&sk->sk_callback_lock); + state = test_bit(flag, &conn->login_flags); + read_unlock_bh(&sk->sk_callback_lock); + } + return state; +} + +static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag) +{ + bool state = false; + + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + state = (__iscsi_target_sk_check_close(sk) || + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); + if (!state) + clear_bit(flag, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } + return state; } static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) @@ -536,6 +582,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work) pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", conn, current->comm, current->pid); + /* + * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() + * before initial PDU processing in iscsi_target_start_negotiation() + * has completed, go ahead and retry until it's cleared. + * + * Otherwise if the TCP connection drops while this is occuring, + * iscsi_target_start_negotiation() will detect the failure, call + * cancel_delayed_work_sync(&conn->login_work), and cleanup the + * remaining iscsi connection resources from iscsi_np process context. + */ + if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { + schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); + return; + } spin_lock(&tpg->tpg_state_lock); state = (tpg->tpg_state == TPG_STATE_ACTIVE); @@ -543,26 +603,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work) if (!state) { pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); - return; + goto err; } - if (conn->sock) { - struct sock *sk = conn->sock->sk; - - read_lock_bh(&sk->sk_callback_lock); - state = iscsi_target_sk_state_check(sk); - read_unlock_bh(&sk->sk_callback_lock); - - if (!state) { - pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); - return; - } + if (iscsi_target_sk_check_close(conn)) { + pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); + goto err; } conn->login_kworker = current; @@ -580,34 +626,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work) flush_signals(current); conn->login_kworker = NULL; - if (rc < 0) { - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); - return; - } + if (rc < 0) + goto err; pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", conn, current->comm, current->pid); rc = iscsi_target_do_login(conn, login); if (rc < 0) { - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); + goto err; } else if (!rc) { - if (conn->sock) { - struct sock *sk = conn->sock->sk; - - write_lock_bh(&sk->sk_callback_lock); - clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); - write_unlock_bh(&sk->sk_callback_lock); - } + if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) + goto err; } else if (rc == 1) { iscsi_target_nego_release(conn); iscsi_post_login_handler(np, conn, zero_tsih); iscsit_deaccess_np(np, tpg, tpg_np); } + return; + +err: + iscsi_target_restore_sock_callbacks(conn); + iscsi_target_login_drop(conn, login); + iscsit_deaccess_np(np, tpg, tpg_np); } static void iscsi_target_do_cleanup(struct work_struct *work) @@ -655,31 +696,54 @@ static void iscsi_target_sk_state_change(struct sock *sk) orig_state_change(sk); return; } + state = __iscsi_target_sk_check_close(sk); + pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); + if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" " conn: %p\n", conn); + if (state) + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); write_unlock_bh(&sk->sk_callback_lock); orig_state_change(sk); return; } - if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { + if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", conn); write_unlock_bh(&sk->sk_callback_lock); orig_state_change(sk); return; } + /* + * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, + * but only queue conn->login_work -> iscsi_target_do_login_rx() + * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. + * + * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() + * will detect the dropped TCP connection from delayed workqueue context. + * + * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial + * iscsi_target_start_negotiation() is running, iscsi_target_do_login() + * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() + * via iscsi_target_sk_check_and_clear() is responsible for detecting the + * dropped TCP connection in iscsi_np process context, and cleaning up + * the remaining iscsi connection resources. + */ + if (state) { + pr_debug("iscsi_target_sk_state_change got failed state\n"); + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); + state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); - state = iscsi_target_sk_state_check(sk); - write_unlock_bh(&sk->sk_callback_lock); - - pr_debug("iscsi_target_sk_state_change: state: %d\n", state); + orig_state_change(sk); - if (!state) { - pr_debug("iscsi_target_sk_state_change got failed state\n"); - schedule_delayed_work(&conn->login_cleanup_work, 0); + if (!state) + schedule_delayed_work(&conn->login_work, 0); return; } + write_unlock_bh(&sk->sk_callback_lock); + orig_state_change(sk); } @@ -944,6 +1008,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo if (iscsi_target_handle_csg_one(conn, login) < 0) return -1; if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { + /* + * Check to make sure the TCP connection has not + * dropped asynchronously while session reinstatement + * was occuring in this kthread context, before + * transitioning to full feature phase operation. + */ + if (iscsi_target_sk_check_close(conn)) + return -1; + login->tsih = conn->sess->tsih; login->login_complete = 1; iscsi_target_restore_sock_callbacks(conn); @@ -970,21 +1043,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo break; } - if (conn->sock) { - struct sock *sk = conn->sock->sk; - bool state; - - read_lock_bh(&sk->sk_callback_lock); - state = iscsi_target_sk_state_check(sk); - read_unlock_bh(&sk->sk_callback_lock); - - if (!state) { - pr_debug("iscsi_target_do_login() failed state for" - " conn: %p\n", conn); - return -1; - } - } - return 0; } @@ -1248,16 +1306,28 @@ int iscsi_target_start_negotiation( { int ret; + if (conn->sock) { + struct sock *sk = conn->sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + set_bit(LOGIN_FLAGS_READY, &conn->login_flags); + set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); + write_unlock_bh(&sk->sk_callback_lock); + } + /* + * If iscsi_target_do_login returns zero to signal more PDU + * exchanges are required to complete the login, go ahead and + * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection + * is still active. + * + * Otherwise if TCP connection dropped asynchronously, go ahead + * and perform connection cleanup now. + */ ret = iscsi_target_do_login(conn, login); - if (!ret) { - if (conn->sock) { - struct sock *sk = conn->sock->sk; + if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) + ret = -1; - write_lock_bh(&sk->sk_callback_lock); - set_bit(LOGIN_FLAGS_READY, &conn->login_flags); - write_unlock_bh(&sk->sk_callback_lock); - } - } else if (ret < 0) { + if (ret < 0) { cancel_delayed_work_sync(&conn->login_work); cancel_delayed_work_sync(&conn->login_cleanup_work); iscsi_target_restore_sock_callbacks(conn); diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index f916d18ccb48..b070ddf1dc37 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link( pr_err("Source se_lun->lun_se_dev does not exist\n"); return -EINVAL; } + if (lun->lun_shutdown) { + pr_err("Unable to create mappedlun symlink because" + " lun->lun_shutdown=true\n"); + return -EINVAL; + } se_tpg = lun->lun_tpg; nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 899c33b3c734..f69f4902dc07 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -673,6 +673,8 @@ void core_tpg_remove_lun( */ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); + lun->lun_shutdown = true; + core_clear_lun_from_tpg(lun, tpg); /* * Wait for any active I/O references to percpu se_lun->lun_ref to @@ -694,6 +696,8 @@ void core_tpg_remove_lun( } if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) hlist_del_rcu(&lun->link); + + lun->lun_shutdown = false; mutex_unlock(&tpg->tpg_lun_mutex); percpu_ref_exit(&lun->lun_ref); diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c index 4b586f62cdc7..bca85bf2f7ec 100644 --- a/drivers/thermal/msm_thermal.c +++ b/drivers/thermal/msm_thermal.c @@ -200,6 +200,7 @@ static bool cluster_info_probed; static bool cluster_info_nodes_called; static bool in_suspend, retry_in_progress; static bool lmh_dcvs_available; +static bool lmh_dcvs_is_supported; static int *tsens_id_map; static int *zone_id_tsens_map; static DEFINE_MUTEX(vdd_rstr_mutex); @@ -995,7 +996,7 @@ static int msm_thermal_cpufreq_callback(struct notifier_block *nfb, switch (event) { case CPUFREQ_ADJUST: - max_freq_req = (lmh_dcvs_available) ? UINT_MAX : + max_freq_req = (lmh_dcvs_is_supported) ? UINT_MAX : cpus[policy->cpu].parent_ptr->limited_max_freq; min_freq_req = cpus[policy->cpu].parent_ptr->limited_min_freq; pr_debug("mitigating CPU%d to freq max: %u min: %u\n", @@ -4996,7 +4997,7 @@ static ssize_t __ref store_cc_enabled(struct kobject *kobj, hotplug_init_cpu_offlined(); mutex_lock(&core_control_mutex); update_offline_cores(cpus_offlined); - if (hotplug_enabled) { + if (hotplug_enabled && hotplug_task) { for_each_possible_cpu(cpu) { if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu))) @@ -5379,7 +5380,7 @@ int msm_thermal_init(struct msm_thermal_data *pdata) if (ret) pr_err("cannot register cpufreq notifier. err:%d\n", ret); - if (!lmh_dcvs_available) { + if (!lmh_dcvs_is_supported) { register_reboot_notifier(&msm_thermal_reboot_notifier); pm_notifier(msm_thermal_suspend_callback, 0); } @@ -7414,6 +7415,7 @@ static int msm_thermal_dev_probe(struct platform_device *pdev) if (ret) goto probe_exit; + lmh_dcvs_is_supported = of_property_read_bool(node, "clock-names"); probe_cc(node, &data, pdev); probe_freq_mitigation(node, &data, pdev); probe_cx_phase_ctrl(node, &data, pdev); diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c index b80e75fc3521..416006a3384c 100644 --- a/drivers/tty/serial/msm_serial_hs.c +++ b/drivers/tty/serial/msm_serial_hs.c @@ -218,6 +218,7 @@ struct msm_hs_wakeup { }; struct msm_hs_port { + bool startup_locked; struct uart_port uport; unsigned long imr_reg; /* shadow value of UARTDM_IMR */ struct clk *clk; @@ -292,6 +293,8 @@ static struct msm_hs_port *msm_hs_get_hs_port(int port_index); static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport); static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport); static int msm_hs_pm_resume(struct device *dev); +static void msm_hs_pm_suspend(struct device *dev); + #define UARTDM_TO_MSM(uart_port) \ container_of((uart_port), struct msm_hs_port, uport) @@ -392,6 +395,8 @@ static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport) { struct uart_port *uport = &(msm_uport->uport); int rc = atomic_read(&msm_uport->resource_count); + struct msm_hs_tx *tx = &msm_uport->tx; + struct msm_hs_rx *rx = &msm_uport->rx; MSM_HS_DBG("%s(): power usage count %d", __func__, rc); if (rc <= 0) { @@ -400,8 +405,15 @@ static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport) return; } atomic_dec(&msm_uport->resource_count); - pm_runtime_mark_last_busy(uport->dev); - pm_runtime_put_autosuspend(uport->dev); + + if (pm_runtime_enabled(uport->dev)) { + pm_runtime_mark_last_busy(uport->dev); + pm_runtime_put_autosuspend(uport->dev); + } else { + MSM_HS_DBG("%s():tx.flush:%d,in_flight:%d,rx.flush:%d\n", + __func__, tx->flush, tx->dma_in_flight, rx->flush); + msm_hs_pm_suspend(uport->dev); + } } /* Vote for resources before accessing them */ @@ -585,6 +597,8 @@ static void hex_dump_ipc(struct msm_hs_port *msm_uport, void *ipc_ctx, char buf[(BUF_DUMP_SIZE * 3) + 2]; int len = 0; + if (msm_uport->ipc_debug_mask == FATAL_LEV) + return; len = min(size, BUF_DUMP_SIZE); /* * Print upto 32 data bytes, 32 bytes per line, 1 byte at a time and @@ -635,6 +649,7 @@ static int msm_serial_loopback_enable_set(void *data, u64 val) unsigned long flags; int ret = 0; + msm_uport->startup_locked = true; msm_hs_resource_vote(msm_uport); if (val) { @@ -654,7 +669,7 @@ static int msm_serial_loopback_enable_set(void *data, u64 val) } /* Calling CLOCK API. Hence mb() requires here. */ mb(); - + msm_uport->startup_locked = false; msm_hs_resource_unvote(msm_uport); return 0; } @@ -666,11 +681,13 @@ static int msm_serial_loopback_enable_get(void *data, u64 *val) unsigned long flags; int ret = 0; + msm_uport->startup_locked = true; msm_hs_resource_vote(msm_uport); spin_lock_irqsave(&uport->lock, flags); ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2); spin_unlock_irqrestore(&uport->lock, flags); + msm_uport->startup_locked = false; msm_hs_resource_unvote(msm_uport); @@ -828,6 +845,11 @@ static int msm_hs_spsconnect_rx(struct uart_port *uport) struct sps_register_event *sps_event = &rx->prod.event; unsigned long flags; + if (msm_uport->rx.pending_flag) { + MSM_HS_WARN("%s(): Buffers may be pending 0x%lx", + __func__, msm_uport->rx.pending_flag); + } + /* Establish connection between peripheral and memory endpoint */ ret = sps_connect(sps_pipe_handle, sps_config); if (ret) { @@ -843,9 +865,6 @@ static int msm_hs_spsconnect_rx(struct uart_port *uport) goto reg_event_err; } spin_lock_irqsave(&uport->lock, flags); - if (msm_uport->rx.pending_flag) - MSM_HS_WARN("%s(): Buffers may be pending 0x%lx", - __func__, msm_uport->rx.pending_flag); msm_uport->rx.queued_flag = 0; msm_uport->rx.pending_flag = 0; msm_uport->rx.rx_inx = 0; @@ -1284,6 +1303,8 @@ static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport) int ret = 0; ret = sps_rx_disconnect(sps_pipe_handle); + if (ret) + MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__); if (msm_uport->rx.pending_flag) MSM_HS_WARN("%s(): Buffers may be pending 0x%lx", @@ -1293,8 +1314,6 @@ static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport) msm_uport->rx.pending_flag = 0; msm_uport->rx.rx_inx = 0; - if (ret) - MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__); msm_uport->rx.flush = FLUSH_SHUTDOWN; MSM_HS_DBG("%s: Calling Completion\n", __func__); wake_up(&msm_uport->bam_disconnect_wait); @@ -1352,9 +1371,14 @@ static void msm_hs_stop_rx_locked(struct uart_port *uport) { struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); - if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) + if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) { MSM_HS_WARN("%s(): Clocks are off\n", __func__); - else + /* Make sure resource_on doesn't get called */ + if (msm_hs_clk_bus_vote(msm_uport)) + MSM_HS_ERR("%s:Failed clock vote\n", __func__); + msm_hs_disable_rx(uport); + msm_hs_clk_bus_unvote(msm_uport); + } else msm_hs_disable_rx(uport); if (msm_uport->rx.flush == FLUSH_NONE) @@ -1364,11 +1388,19 @@ static void msm_hs_stop_rx_locked(struct uart_port *uport) static void msm_hs_disconnect_rx(struct uart_port *uport) { struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); + struct msm_hs_rx *rx = &msm_uport->rx; + struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle; + u32 prod_empty = 0; msm_hs_disable_rx(uport); /* Disconnect the BAM RX pipe */ if (msm_uport->rx.flush == FLUSH_NONE) msm_uport->rx.flush = FLUSH_STOP; + + if (sps_is_pipe_empty(sps_pipe_handle, &prod_empty)) { + MSM_HS_WARN("%s():Pipe Not Empty, ret=%d, flush=%d\n", + __func__, prod_empty, msm_uport->rx.flush); + } disconnect_rx_endpoint(msm_uport); MSM_HS_DBG("%s(): rx->flush %d", __func__, msm_uport->rx.flush); } @@ -1389,6 +1421,8 @@ void tx_timeout_handler(unsigned long arg) if (UARTDM_ISR_CURRENT_CTS_BMSK & isr) MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x", __func__, isr); dump_uart_hs_registers(msm_uport); + /* Stop further loging */ + MSM_HS_ERR("%s(): Stop IPC logging\n", __func__); } /* Transmit the next chunk of data */ @@ -1832,11 +1866,27 @@ static void msm_hs_start_tx_locked(struct uart_port *uport) { struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); struct msm_hs_tx *tx = &msm_uport->tx; + unsigned int isr; + + if (msm_uport->startup_locked) { + MSM_HS_DBG("%s(): No Tx Request, startup_locked=%d\n", + __func__, msm_uport->startup_locked); + return; + } /* Bail if transfer in progress */ if (tx->flush < FLUSH_STOP || tx->dma_in_flight) { MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n", __func__, tx->flush, tx->dma_in_flight); + + if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) { + isr = msm_hs_read(uport, UART_DM_ISR); + if (UARTDM_ISR_CURRENT_CTS_BMSK & isr) + MSM_HS_DBG("%s():CTS 1: Peer is Busy, ISR 0x%x", + __func__, isr); + } else + MSM_HS_WARN("%s(): Clocks are off\n", __func__); + return; } @@ -2269,16 +2319,34 @@ void msm_hs_resource_off(struct msm_hs_port *msm_uport) { struct uart_port *uport = &(msm_uport->uport); unsigned int data; + int ret = 0; MSM_HS_DBG("%s(): begin", __func__); msm_hs_disable_flow_control(uport, false); if (msm_uport->rx.flush == FLUSH_NONE) msm_hs_disconnect_rx(uport); + else if (msm_uport->rx.flush != FLUSH_SHUTDOWN) { + MSM_HS_WARN("%s():Rx Flush=%d Not Expected\n", + __func__, msm_uport->rx.flush); + /* disable and disconnect rx */ + ret = wait_event_timeout(msm_uport->rx.wait, + !msm_uport->rx.pending_flag, 500); + if (!ret) + MSM_HS_WARN("%s(): rx disconnect not complete", + __func__); + msm_hs_disconnect_rx(uport); + } else + MSM_HS_DBG("%s():Rx Flush=%d In Proper State\n", + __func__, msm_uport->rx.flush); /* disable dlink */ - if (msm_uport->tx.flush == FLUSH_NONE) - wait_event_timeout(msm_uport->tx.wait, + if (msm_uport->tx.flush == FLUSH_NONE) { + ret = wait_event_timeout(msm_uport->tx.wait, msm_uport->tx.flush == FLUSH_STOP, 500); + if (!ret) + MSM_HS_WARN("%s(): tx disconnect not complete", + __func__); + } if (msm_uport->tx.flush != FLUSH_SHUTDOWN) { data = msm_hs_read(uport, UART_DM_DMEN); @@ -2296,21 +2364,29 @@ void msm_hs_resource_on(struct msm_hs_port *msm_uport) unsigned int data; unsigned long flags; + if (msm_uport->startup_locked) { + MSM_HS_WARN("%s(): startup_locked=%d\n", + __func__, msm_uport->startup_locked); + return; + } + if (msm_uport->rx.flush == FLUSH_SHUTDOWN || msm_uport->rx.flush == FLUSH_STOP) { msm_hs_write(uport, UART_DM_CR, RESET_RX); data = msm_hs_read(uport, UART_DM_DMEN); data |= UARTDM_RX_BAM_ENABLE_BMSK; msm_hs_write(uport, UART_DM_DMEN, data); - } + } else + MSM_HS_DBG("%s():rx.flush=%d, Rx is not enabled\n", + __func__, msm_uport->rx.flush); - msm_hs_spsconnect_tx(msm_uport); if (msm_uport->rx.flush == FLUSH_SHUTDOWN) { msm_hs_spsconnect_rx(uport); spin_lock_irqsave(&uport->lock, flags); msm_hs_start_rx_locked(uport); spin_unlock_irqrestore(&uport->lock, flags); } + msm_hs_spsconnect_tx(msm_uport); } /* Request to turn off uart clock once pending TX is flushed */ @@ -2603,6 +2679,7 @@ static int msm_hs_startup(struct uart_port *uport) struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle; struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle; + msm_uport->startup_locked = true; rfr_level = uport->fifosize; if (rfr_level > 16) rfr_level -= 16; @@ -2654,6 +2731,9 @@ static int msm_hs_startup(struct uart_port *uport) flush_kthread_worker(&msm_uport->rx.kworker); if (rx->flush != FLUSH_SHUTDOWN) disconnect_rx_endpoint(msm_uport); + else + MSM_HS_DBG("%s(): Rx Flush=%d In Proper state\n", + __func__, rx->flush); ret = msm_hs_spsconnect_rx(uport); if (ret) { MSM_HS_ERR("msm_serial_hs: SPS connect failed for RX"); @@ -2729,6 +2809,7 @@ static int msm_hs_startup(struct uart_port *uport) atomic_set(&msm_uport->client_req_state, 0); LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt, "%s: Client_Count 0\n", __func__); + msm_uport->startup_locked = false; msm_hs_start_rx_locked(uport); spin_unlock_irqrestore(&uport->lock, flags); @@ -3157,6 +3238,8 @@ static void msm_hs_pm_suspend(struct device *dev) msm_uport->pm_state = MSM_HS_PM_SUSPENDED; msm_hs_resource_off(msm_uport); obs_manage_irq(msm_uport, false); + if (!atomic_read(&msm_uport->client_req_state)) + enable_wakeup_interrupt(msm_uport); msm_hs_clk_bus_unvote(msm_uport); /* For OBS, don't use wakeup interrupt, set gpio to suspended state */ @@ -3168,8 +3251,6 @@ static void msm_hs_pm_suspend(struct device *dev) __func__); } - if (!atomic_read(&msm_uport->client_req_state)) - enable_wakeup_interrupt(msm_uport); LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt, "%s: PM State Suspended client_count %d\n", __func__, client_count); @@ -3691,9 +3772,14 @@ static void msm_hs_shutdown(struct uart_port *uport) MSM_HS_WARN("%s(): rx disconnect not complete", __func__); msm_hs_disconnect_rx(uport); + } else { + MSM_HS_DBG("%s(): Rx Flush is in Proper state=%d\n", + __func__, msm_uport->rx.flush); } - cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work); + if (cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work)) + MSM_HS_DBG("%s(): Work was pending, canceled it\n", + __func__); flush_workqueue(msm_uport->hsuart_wq); /* BAM Disconnect for TX */ diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index bc5a6966dda9..c31c753b6e28 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1851,7 +1851,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev, /* No more submits can occur */ spin_lock_irq(&hcd_urb_list_lock); rescan: - list_for_each_entry (urb, &ep->urb_list, urb_list) { + list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) { int is_in; if (urb->unlinked) @@ -2505,6 +2505,8 @@ void usb_hc_died (struct usb_hcd *hcd) } if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { hcd = hcd->shared_hcd; + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); + set_bit(HCD_FLAG_DEAD, &hcd->flags); if (hcd->rh_registered) { clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 5de22f4892cd..370ad9690349 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4674,7 +4674,8 @@ hub_power_remaining(struct usb_hub *hub) static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, u16 portchange) { - int status, i; + int status = -ENODEV; + int i; unsigned unit_load; struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd = bus_to_hcd(hdev->bus); @@ -4878,9 +4879,10 @@ loop: done: hub_port_disable(hub, port1, 1); - if (hcd->driver->relinquish_port && !hub->hdev->parent) - hcd->driver->relinquish_port(hcd, port1); - + if (hcd->driver->relinquish_port && !hub->hdev->parent) { + if (status != -ENOTCONN && status != -ENODEV) + hcd->driver->relinquish_port(hcd, port1); + } } /* Handle physical or logical connection change events. diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 3116edfcdc18..574da2b4529c 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ + { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, + /* Avision AV600U */ { USB_DEVICE(0x0638, 0x0a13), .driver_info = USB_QUIRK_STRING_FETCH_255 }, @@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME }, /* Logitech Optical Mouse M90/M100 */ { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 2776cfe64c09..ef9cf4a21afe 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -127,6 +127,22 @@ out: */ #define USB_ACPI_LOCATION_VALID (1 << 31) +static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent, + int raw) +{ + struct acpi_device *adev; + + if (!parent) + return NULL; + + list_for_each_entry(adev, &parent->children, node) { + if (acpi_device_adr(adev) == raw) + return adev; + } + + return acpi_find_child_device(parent, raw, false); +} + static struct acpi_device *usb_acpi_find_companion(struct device *dev) { struct usb_device *udev; @@ -174,8 +190,10 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev) int raw; raw = usb_hcd_find_raw_port_number(hcd, port1); - adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev), - raw, false); + + adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev), + raw); + if (!adev) return NULL; } else { @@ -186,7 +204,9 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev) return NULL; acpi_bus_get_device(parent_handle, &adev); - adev = acpi_find_child_device(adev, port1, false); + + adev = usb_acpi_find_port(adev, port1); + if (!adev) return NULL; } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index c2eba06f2ace..0744b14e120b 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1333,8 +1333,6 @@ static int dwc3_remove(struct platform_device *pdev) dwc3_event_buffers_cleanup(dwc); dwc3_free_event_buffers(dwc); - usb_phy_set_suspend(dwc->usb2_phy, 1); - usb_phy_set_suspend(dwc->usb3_phy, 1); phy_power_off(dwc->usb2_generic_phy); phy_power_off(dwc->usb3_generic_phy); diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index 46a077b0a930..d92a33097461 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -58,7 +58,7 @@ /* time out to wait for USB cable status notification (in ms)*/ #define SM_INIT_TIMEOUT 30000 - +#define DWC3_WAKEUP_SRC_TIMEOUT 5000 /* AHB2PHY register offsets */ #define PERIPH_SS_AHB2PHY_TOP_CFG 0x10 @@ -216,6 +216,7 @@ struct dwc3_msm { struct notifier_block usbdev_nb; bool hc_died; bool xhci_ss_compliance_enable; + bool no_wakeup_src_in_hostmode; struct extcon_dev *extcon_vbus; struct extcon_dev *extcon_id; @@ -233,6 +234,7 @@ struct dwc3_msm { struct pm_qos_request pm_qos_req_dma; struct delayed_work perf_vote_work; struct delayed_work sdp_check; + struct mutex suspend_resume_mutex; }; #define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */ @@ -1997,8 +1999,10 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc) dbg_event(0xFF, "Ctl Sus", atomic_read(&dwc->in_lpm)); + mutex_lock(&mdwc->suspend_resume_mutex); if (atomic_read(&dwc->in_lpm)) { dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__); + mutex_unlock(&mdwc->suspend_resume_mutex); return 0; } @@ -2015,6 +2019,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc) __func__, evt->count / 4); dbg_print_reg("PENDING DEVICE EVENT", *(u32 *)(evt->buf + evt->lpos)); + mutex_unlock(&mdwc->suspend_resume_mutex); return -EBUSY; } } @@ -2034,6 +2039,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc) dev_dbg(mdwc->dev, "%s: cable disconnected while not in idle otg state\n", __func__); + mutex_unlock(&mdwc->suspend_resume_mutex); return -EBUSY; } @@ -2047,12 +2053,15 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc) pr_err("%s(): Trying to go in LPM with state:%d\n", __func__, dwc->gadget.state); pr_err("%s(): LPM is not performed.\n", __func__); + mutex_unlock(&mdwc->suspend_resume_mutex); return -EBUSY; } ret = dwc3_msm_prepare_suspend(mdwc); - if (ret) + if (ret) { + mutex_unlock(&mdwc->suspend_resume_mutex); return ret; + } /* Initialize variables here */ can_suspend_ssphy = !(mdwc->in_host_mode && @@ -2153,6 +2162,7 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc) } dev_info(mdwc->dev, "DWC3 in low power mode\n"); + mutex_unlock(&mdwc->suspend_resume_mutex); return 0; } @@ -2164,8 +2174,10 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__); + mutex_lock(&mdwc->suspend_resume_mutex); if (!atomic_read(&dwc->in_lpm)) { dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__); + mutex_unlock(&mdwc->suspend_resume_mutex); return 0; } @@ -2300,6 +2312,7 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC)); dbg_event(0xFF, "Ctl Res", atomic_read(&dwc->in_lpm)); + mutex_unlock(&mdwc->suspend_resume_mutex); return 0; } @@ -2338,6 +2351,7 @@ static void dwc3_ext_event_notify(struct dwc3_msm *mdwc) clear_bit(B_SUSPEND, &mdwc->inputs); } + pm_stay_awake(mdwc->dev); schedule_delayed_work(&mdwc->sm_work, 0); } @@ -2626,6 +2640,7 @@ static int dwc3_msm_id_notifier(struct notifier_block *nb, if (mdwc->id_state != id) { mdwc->id_state = id; dbg_event(0xFF, "id_state", mdwc->id_state); + pm_stay_awake(mdwc->dev); queue_work(mdwc->dwc3_wq, &mdwc->resume_work); } @@ -2688,6 +2703,7 @@ static int dwc3_msm_vbus_notifier(struct notifier_block *nb, mdwc->vbus_active = event; if (dwc->is_drd && !mdwc->in_restart) { dbg_event(0xFF, "Q RW (vbus)", mdwc->vbus_active); + pm_stay_awake(mdwc->dev); queue_work(mdwc->dwc3_wq, &mdwc->resume_work); } done: @@ -3087,6 +3103,11 @@ static int dwc3_msm_probe(struct platform_device *pdev) mdwc->disable_host_mode_pm = of_property_read_bool(node, "qcom,disable-host-mode-pm"); + mdwc->no_wakeup_src_in_hostmode = of_property_read_bool(node, + "qcom,no-wakeup-src-in-hostmode"); + if (mdwc->no_wakeup_src_in_hostmode) + dev_dbg(&pdev->dev, "dwc3 host not using wakeup source\n"); + dwc3_set_notifier(&dwc3_msm_notify_event); /* Assumes dwc3 is the first DT child of dwc3-msm */ @@ -3179,6 +3200,7 @@ static int dwc3_msm_probe(struct platform_device *pdev) POWER_SUPPLY_PROP_PRESENT, &pval); } + mutex_init(&mdwc->suspend_resume_mutex); /* Update initial VBUS/ID state from extcon */ if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus, EXTCON_USB)) @@ -3879,12 +3901,14 @@ static void dwc3_otg_sm_work(struct work_struct *w) mdwc->otg_state = OTG_STATE_A_IDLE; goto ret; } + pm_wakeup_event(mdwc->dev, DWC3_WAKEUP_SRC_TIMEOUT); } break; case OTG_STATE_A_HOST: if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) { - dev_dbg(mdwc->dev, "id || hc_died\n"); + dbg_event(0xFF, "id || hc_died", 0); + dev_dbg(mdwc->dev, "%s state id || hc_died\n", state); dwc3_otg_start_host(mdwc, 0); mdwc->otg_state = OTG_STATE_B_IDLE; mdwc->vbus_retry_count = 0; @@ -3895,6 +3919,7 @@ static void dwc3_otg_sm_work(struct work_struct *w) dbg_event(0xFF, "XHCIResume", 0); if (dwc) pm_runtime_resume(&dwc->xhci->dev); + pm_wakeup_event(mdwc->dev, DWC3_WAKEUP_SRC_TIMEOUT); } break; @@ -3910,6 +3935,34 @@ ret: return; } +static int dwc3_msm_pm_prepare(struct device *dev) +{ + struct dwc3_msm *mdwc = dev_get_drvdata(dev); + struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3); + struct usb_hcd *hcd; + struct xhci_hcd *xhci; + + dev_dbg(dev, "dwc3-msm PM prepare,lpm:%u\n", atomic_read(&dwc->in_lpm)); + dbg_event(0xFF, "PM Prep", 0); + if (!mdwc->in_host_mode || !mdwc->no_wakeup_src_in_hostmode) + return 0; + + hcd = dev_get_drvdata(&dwc->xhci->dev); + xhci = hcd_to_xhci(hcd); + flush_delayed_work(&mdwc->sm_work); + + /* If in lpm then prevent usb core to runtime_resume from pm_suspend */ + if (atomic_read(&dwc->in_lpm)) { + hcd_to_bus(hcd)->skip_resume = true; + hcd_to_bus(xhci->shared_hcd)->skip_resume = true; + } else { + hcd_to_bus(hcd)->skip_resume = false; + hcd_to_bus(xhci->shared_hcd)->skip_resume = false; + } + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int dwc3_msm_pm_suspend(struct device *dev) { @@ -3921,7 +3974,7 @@ static int dwc3_msm_pm_suspend(struct device *dev) dbg_event(0xFF, "PM Sus", 0); flush_workqueue(mdwc->dwc3_wq); - if (!atomic_read(&dwc->in_lpm)) { + if (!atomic_read(&dwc->in_lpm) && !mdwc->no_wakeup_src_in_hostmode) { dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n"); return -EBUSY; } @@ -3945,8 +3998,13 @@ static int dwc3_msm_pm_resume(struct device *dev) flush_workqueue(mdwc->dwc3_wq); atomic_set(&mdwc->pm_suspended, 0); + /* Resume h/w in host mode as it may not be runtime suspended */ + if (mdwc->no_wakeup_src_in_hostmode && !test_bit(ID, &mdwc->inputs)) + dwc3_msm_resume(mdwc); + /* kick in otg state machine */ - queue_work(mdwc->dwc3_wq, &mdwc->resume_work); + if (mdwc->vbus_active || !mdwc->id_state) + queue_work(mdwc->dwc3_wq, &mdwc->resume_work); return 0; } @@ -3983,6 +4041,7 @@ static int dwc3_msm_runtime_resume(struct device *dev) #endif static const struct dev_pm_ops dwc3_msm_dev_pm_ops = { + .prepare = dwc3_msm_pm_prepare, SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume) SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume, dwc3_msm_runtime_idle) diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 442d44278f33..22ba45f40f0b 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2088,7 +2088,7 @@ static ssize_t suspended_show(struct device *dev, struct device_attribute *attr, struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_composite_dev *cdev = get_gadget_data(gadget); - return sprintf(buf, "%d\n", cdev->suspended); + return snprintf(buf, PAGE_SIZE, "%d\n", cdev->suspended); } static DEVICE_ATTR_RO(suspended); diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index a2c14bb5efa4..255a11f595c4 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1540,6 +1540,18 @@ static void android_disconnect(struct usb_gadget *gadget) gi = container_of(cdev, struct gadget_info, cdev); + /* FIXME: There's a race between usb_gadget_udc_stop() which is likely + * to set the gadget driver to NULL in the udc driver and this drivers + * gadget disconnect fn which likely checks for the gadget driver to + * be a null ptr. It happens that unbind (doing set_gadget_data(NULL)) + * is called before the gadget driver is set to NULL and the udc driver + * calls disconnect fn which results in cdev being a null ptr. + */ + if (cdev == NULL) { + WARN(1, "%s: gadget driver already disconnected\n", __func__); + return; + } + /* accessory HID support can be active while the accessory function is not actually enabled, so we need to inform it when we are disconnected. diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c index 7950f25136a5..a412f024d834 100644 --- a/drivers/usb/gadget/function/f_accessory.c +++ b/drivers/usb/gadget/function/f_accessory.c @@ -865,6 +865,14 @@ static struct hid_driver acc_hid_driver = { .probe = acc_hid_probe, }; +static void acc_complete_setup_noop(struct usb_ep *ep, struct usb_request *req) +{ + /* + * Default no-op function when nothing needs to be done for the + * setup request + */ +} + int acc_ctrlrequest(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl) { @@ -892,6 +900,7 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev, schedule_delayed_work( &dev->start_work, msecs_to_jiffies(10)); value = 0; + cdev->req->complete = acc_complete_setup_noop; } else if (b_request == ACCESSORY_SEND_STRING) { dev->string_index = w_index; cdev->gadget->ep0->driver_data = dev; @@ -900,10 +909,13 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev, } else if (b_request == ACCESSORY_SET_AUDIO_MODE && w_index == 0 && w_length == 0) { dev->audio_mode = w_value; + cdev->req->complete = acc_complete_setup_noop; value = 0; } else if (b_request == ACCESSORY_REGISTER_HID) { + cdev->req->complete = acc_complete_setup_noop; value = acc_register_hid(dev, w_value, w_index); } else if (b_request == ACCESSORY_UNREGISTER_HID) { + cdev->req->complete = acc_complete_setup_noop; value = acc_unregister_hid(dev, w_value); } else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) { spin_lock_irqsave(&dev->lock, flags); @@ -938,7 +950,7 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev, if (b_request == ACCESSORY_GET_PROTOCOL) { *((u16 *)cdev->req->buf) = PROTOCOL_VERSION; value = sizeof(u16); - + cdev->req->complete = acc_complete_setup_noop; /* clear any string left over from a previous session */ memset(dev->manufacturer, 0, sizeof(dev->manufacturer)); memset(dev->model, 0, sizeof(dev->model)); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index b6f4790ffc08..bb02b3be17f7 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -46,7 +46,8 @@ static void *ffs_ipc_log; #define ffs_log(fmt, ...) do { \ - ipc_log_string(ffs_ipc_log, "%s: " fmt, __func__, \ + if (ffs_ipc_log) \ + ipc_log_string(ffs_ipc_log, "%s: " fmt, __func__, \ ##__VA_ARGS__); \ pr_debug(fmt, ##__VA_ARGS__); \ } while (0) @@ -1610,10 +1611,6 @@ static int functionfs_init(void) else pr_err("failed registering file system (%d)\n", ret); - ffs_ipc_log = ipc_log_context_create(NUM_PAGES, "f_fs", 0); - if (IS_ERR_OR_NULL(ffs_ipc_log)) - ffs_ipc_log = NULL; - return ret; } @@ -1623,14 +1620,8 @@ static void functionfs_cleanup(void) pr_info("unloading\n"); unregister_filesystem(&ffs_fs_type); - - if (ffs_ipc_log) { - ipc_log_context_destroy(ffs_ipc_log); - ffs_ipc_log = NULL; - } } - /* ffs_data and ffs_function construction and destruction code **************/ static void ffs_data_clear(struct ffs_data *ffs); @@ -4063,5 +4054,28 @@ static char *ffs_prepare_buffer(const char __user *buf, size_t len) } DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc); + +static int ffs_init(void) +{ + ffs_ipc_log = ipc_log_context_create(NUM_PAGES, "f_fs", 0); + if (IS_ERR_OR_NULL(ffs_ipc_log)) { + ffs_ipc_log = NULL; + pr_err("%s: Create IPC log context failure\n", + __func__); + } + + return 0; +} +module_init(ffs_init); + +static void __exit ffs_exit(void) +{ + if (ffs_ipc_log) { + ipc_log_context_destroy(ffs_ipc_log); + ffs_ipc_log = NULL; + } +} +module_exit(ffs_exit); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michal Nazarewicz"); diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index 19fe6c8cb25a..3b925d9b000e 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -885,8 +885,9 @@ static void gsi_ctrl_clear_cpkt_queues(struct f_gsi *gsi, bool skip_req_q) { struct gsi_ctrl_pkt *cpkt = NULL; struct list_head *act, *tmp; + unsigned long flags; - spin_lock(&gsi->c_port.lock); + spin_lock_irqsave(&gsi->c_port.lock, flags); if (skip_req_q) goto clean_resp_q; @@ -901,7 +902,7 @@ clean_resp_q: list_del(&cpkt->list); gsi_ctrl_pkt_free(cpkt); } - spin_unlock(&gsi->c_port.lock); + spin_unlock_irqrestore(&gsi->c_port.lock, flags); } static int gsi_ctrl_send_cpkt_tomodem(struct f_gsi *gsi, void *buf, size_t len) @@ -2507,6 +2508,16 @@ static int gsi_bind(struct usb_configuration *c, struct usb_function *f) DEFAULT_PKT_ALIGNMENT_FACTOR); rndis_set_pkt_alignment_factor(gsi->params, DEFAULT_PKT_ALIGNMENT_FACTOR); + if (gsi->rndis_use_wceis) { + info.iad_desc->bFunctionClass = + USB_CLASS_WIRELESS_CONTROLLER; + info.iad_desc->bFunctionSubClass = 0x01; + info.iad_desc->bFunctionProtocol = 0x03; + info.ctrl_desc->bInterfaceClass = + USB_CLASS_WIRELESS_CONTROLLER; + info.ctrl_desc->bInterfaceSubClass = 0x1; + info.ctrl_desc->bInterfaceProtocol = 0x03; + } break; case IPA_USB_MBIM: info.string_defs = mbim_gsi_string_defs; @@ -2986,6 +2997,42 @@ static ssize_t gsi_info_show(struct config_item *item, char *page) CONFIGFS_ATTR_RO(gsi_, info); +static ssize_t gsi_rndis_wceis_show(struct config_item *item, char *page) +{ + struct f_gsi *gsi = to_gsi_opts(item)->gsi; + + return snprintf(page, PAGE_SIZE, "%d\n", gsi->rndis_use_wceis); +} + +static ssize_t gsi_rndis_wceis_store(struct config_item *item, + const char *page, size_t len) +{ + struct f_gsi *gsi = to_gsi_opts(item)->gsi; + bool val; + + if (kstrtobool(page, &val)) + return -EINVAL; + + gsi->rndis_use_wceis = val; + + return len; +} + +CONFIGFS_ATTR(gsi_, rndis_wceis); + +static struct configfs_attribute *gsi_rndis_attrs[] = { + &gsi_attr_info, + &gsi_attr_rndis_wceis, + NULL, +}; + +static struct config_item_type gsi_func_rndis_type = { + .ct_item_ops = &gsi_item_ops, + .ct_attrs = gsi_rndis_attrs, + .ct_owner = THIS_MODULE, +}; + + static struct configfs_attribute *gsi_attrs[] = { &gsi_attr_info, NULL, @@ -3015,6 +3062,9 @@ static int gsi_set_inst_name(struct usb_function_instance *fi, return -EINVAL; } + if (ret == IPA_USB_RNDIS) + config_group_init_type_name(&opts->func_inst.group, "", + &gsi_func_rndis_type); gsi = gsi_function_init(ret); if (IS_ERR(gsi)) return PTR_ERR(gsi); diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h index 262a60e8a450..14490ab296c2 100644 --- a/drivers/usb/gadget/function/f_gsi.h +++ b/drivers/usb/gadget/function/f_gsi.h @@ -237,6 +237,7 @@ struct f_gsi { struct rndis_params *params; atomic_t connected; bool data_interface_up; + bool rndis_use_wceis; const struct usb_endpoint_descriptor *in_ep_desc_backup; const struct usb_endpoint_descriptor *out_ep_desc_backup; @@ -470,9 +471,9 @@ static struct usb_interface_descriptor rndis_gsi_control_intf = { /* .bInterfaceNumber = DYNAMIC */ /* status endpoint is optional; this could be patched later */ .bNumEndpoints = 1, - .bInterfaceClass = USB_CLASS_WIRELESS_CONTROLLER, - .bInterfaceSubClass = 0x01, - .bInterfaceProtocol = 0x03, + .bInterfaceClass = USB_CLASS_MISC, + .bInterfaceSubClass = 0x04, + .bInterfaceProtocol = 0x01, /* RNDIS over Ethernet */ /* .iInterface = DYNAMIC */ }; @@ -530,9 +531,9 @@ rndis_gsi_iad_descriptor = { .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, .bFirstInterface = 0, /* XXX, hardcoded */ .bInterfaceCount = 2, /* control + data */ - .bFunctionClass = USB_CLASS_WIRELESS_CONTROLLER, - .bFunctionSubClass = 0x01, - .bFunctionProtocol = 0x03, + .bFunctionClass = USB_CLASS_MISC, + .bFunctionSubClass = 0x04, + .bFunctionProtocol = 0x01, /* RNDIS over Ethernet */ /* .iFunction = DYNAMIC */ }; diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 9123f1635843..1ffed7b74d3f 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -1073,6 +1073,9 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr) struct eth_dev *dev; u8 new_addr[ETH_ALEN]; + if (!net) + return -ENODEV; + dev = netdev_priv(net); if (get_ether_addr(dev_addr, new_addr)) return -EINVAL; @@ -1085,6 +1088,9 @@ int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) { struct eth_dev *dev; + if (!net) + return -ENODEV; + dev = netdev_priv(net); return get_ether_addr_str(dev->dev_mac, dev_addr, len); } @@ -1095,6 +1101,9 @@ int gether_set_host_addr(struct net_device *net, const char *host_addr) struct eth_dev *dev; u8 new_addr[ETH_ALEN]; + if (!net) + return -ENODEV; + dev = netdev_priv(net); if (get_ether_addr(host_addr, new_addr)) return -EINVAL; @@ -1107,6 +1116,9 @@ int gether_get_host_addr(struct net_device *net, char *host_addr, int len) { struct eth_dev *dev; + if (!net) + return -ENODEV; + dev = netdev_priv(net); return get_ether_addr_str(dev->host_mac, host_addr, len); } @@ -1139,6 +1151,9 @@ void gether_set_qmult(struct net_device *net, unsigned qmult) { struct eth_dev *dev; + if (!net) + return; + dev = netdev_priv(net); dev->qmult = qmult; } @@ -1148,6 +1163,9 @@ unsigned gether_get_qmult(struct net_device *net) { struct eth_dev *dev; + if (!net) + return -ENODEV; + dev = netdev_priv(net); return dev->qmult; } @@ -1155,6 +1173,9 @@ EXPORT_SYMBOL_GPL(gether_get_qmult); int gether_get_ifname(struct net_device *net, char *name, int len) { + if (!net) + return -ENODEV; + rtnl_lock(); strlcpy(name, netdev_name(net), len); rtnl_unlock(); diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index f9400564cb72..03b9a372636f 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -89,6 +89,7 @@ enum amd_chipset_gen { AMD_CHIPSET_HUDSON2, AMD_CHIPSET_BOLTON, AMD_CHIPSET_YANGTZE, + AMD_CHIPSET_TAISHAN, AMD_CHIPSET_UNKNOWN, }; @@ -132,6 +133,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo) pinfo->sb_type.gen = AMD_CHIPSET_SB700; else if (rev >= 0x40 && rev <= 0x4f) pinfo->sb_type.gen = AMD_CHIPSET_SB800; + } + pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + 0x145c, NULL); + if (pinfo->smbus_dev) { + pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN; } else { pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); @@ -251,11 +257,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) { /* Make sure amd chipset type has already been initialized */ usb_amd_find_chipset_info(); - if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE) - return 0; - - dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); - return 1; + if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE || + amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) { + dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); + return 1; + } + return 0; } EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 56a9cd62f2c4..c6998f086e12 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -325,6 +325,34 @@ static int xhci_plat_remove(struct platform_device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int xhci_plat_suspend(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + if (!xhci) + return 0; + + dev_dbg(dev, "xhci-plat PM suspend\n"); + + return xhci_suspend(xhci, true); +} + +static int xhci_plat_resume(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + if (!xhci) + return 0; + + dev_dbg(dev, "xhci-plat PM resume\n"); + + return xhci_resume(xhci, false); +} +#endif + #ifdef CONFIG_PM static int xhci_plat_runtime_idle(struct device *dev) { @@ -373,7 +401,7 @@ static int xhci_plat_runtime_resume(struct device *dev) } static const struct dev_pm_ops xhci_plat_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(NULL, NULL) + SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume) SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume, xhci_plat_runtime_idle) }; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index aab1c7903288..641e0280ad5a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -918,7 +918,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) struct usb_hcd *hcd = xhci_to_hcd(xhci); u32 command; - if (!hcd->state) + if (!hcd->state || xhci->suspended) return 0; if (hcd->state != HC_STATE_SUSPENDED || @@ -988,6 +988,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) /* step 5: remove core well power */ /* synchronize irq when using MSI-X */ xhci_msix_sync_irqs(xhci); + xhci->suspended = true; return rc; } @@ -1007,7 +1008,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) int retval = 0; bool comp_timer_running = false; - if (!hcd->state) + if (!hcd->state || !xhci->suspended) return 0; /* Wait a bit if either of the roothubs need to settle from the @@ -1141,6 +1142,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) /* Re-enable port polling. */ xhci_dbg(xhci, "%s: starting port polling.\n", __func__); + xhci->suspended = false; set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); usb_hcd_poll_rh_status(xhci->shared_hcd); set_bit(HCD_FLAG_POLL_RH, &hcd->flags); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 8fcec1be6b1a..7fc97d930657 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1667,6 +1667,7 @@ struct xhci_hcd { /* Compliance Mode Recovery Data */ struct timer_list comp_mode_recovery_timer; u32 port_status_u0; + bool suspended; /* Compliance Mode Timer Triggered every 2 seconds */ #define COMP_MODE_RCVRY_MSECS 2000 }; diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 13d5614f37f1..0d843e0f8055 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -138,6 +138,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) "Could not flush host TX%d fifo: csr: %04x\n", ep->epnum, csr)) return; + mdelay(1); } } diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index f9f47da8a88b..3c0386ee5875 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -125,6 +125,12 @@ enum usbpd_control_msg_type { MSG_VCONN_SWAP, MSG_WAIT, MSG_SOFT_RESET, + MSG_NOT_SUPPORTED = 0x10, + MSG_GET_SOURCE_CAP_EXTENDED, + MSG_GET_STATUS, + MSG_FR_SWAP, + MSG_GET_PPS_STATUS, + MSG_GET_COUNTRY_CODES, }; enum usbpd_data_msg_type { @@ -132,9 +138,29 @@ enum usbpd_data_msg_type { MSG_REQUEST, MSG_BIST, MSG_SINK_CAPABILITIES, + MSG_BATTERY_STATUS, + MSG_ALERT, + MSG_GET_COUNTRY_INFO, MSG_VDM = 0xF, }; +enum usbpd_ext_msg_type { + MSG_SOURCE_CAPABILITIES_EXTENDED = 1, + MSG_STATUS, + MSG_GET_BATTERY_CAP, + MSG_GET_BATTERY_STATUS, + MSG_BATTERY_CAPABILITIES, + MSG_GET_MANUFACTURER_INFO, + MSG_MANUFACTURER_INFO, + MSG_SECURITY_REQUEST, + MSG_SECURITY_RESPONSE, + MSG_FIRMWARE_UPDATE_REQUEST, + MSG_FIRMWARE_UPDATE_RESPONSE, + MSG_PPS_STATUS, + MSG_COUNTRY_INFO, + MSG_COUNTRY_CODES, +}; + enum vdm_state { VDM_NONE, DISCOVERED_ID, @@ -198,13 +224,30 @@ static void *usbpd_ipc_log; #define PD_MAX_DATA_OBJ 7 +#define PD_SRC_CAP_EXT_DB_LEN 24 +#define PD_STATUS_DB_LEN 5 +#define PD_BATTERY_CAP_DB_LEN 9 + +#define PD_MAX_EXT_MSG_LEN 260 +#define PD_MAX_EXT_MSG_LEGACY_LEN 26 + #define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \ - (((type) & 0xF) | ((dr) << 5) | (rev << 6) | \ + (((type) & 0x1F) | ((dr) << 5) | (rev << 6) | \ ((pr) << 8) | ((id) << 9) | ((cnt) << 12)) -#define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7) -#define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0xF) -#define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7) -#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3) +#define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7) +#define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0x1F) +#define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7) +#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3) +#define PD_MSG_HDR_EXTENDED BIT(15) +#define PD_MSG_HDR_IS_EXTENDED(hdr) ((hdr) & PD_MSG_HDR_EXTENDED) + +#define PD_MSG_EXT_HDR(chunked, num, req, size) \ + (((chunked) << 15) | (((num) & 0xF) << 11) | \ + ((req) << 10) | ((size) & 0x1FF)) +#define PD_MSG_EXT_HDR_IS_CHUNKED(ehdr) ((ehdr) & 0x8000) +#define PD_MSG_EXT_HDR_CHUNK_NUM(ehdr) (((ehdr) >> 11) & 0xF) +#define PD_MSG_EXT_HDR_REQ_CHUNK(ehdr) ((ehdr) & 0x400) +#define PD_MSG_EXT_HDR_DATA_SIZE(ehdr) ((ehdr) & 0x1FF) #define PD_RDO_FIXED(obj, gb, mismatch, usb_comm, no_usb_susp, curr1, curr2) \ (((obj) << 28) | ((gb) << 27) | ((mismatch) << 26) | \ @@ -291,19 +334,24 @@ static const u32 default_src_caps[] = { 0x36019096 }; /* VSafe5V @ 1.5A */ static const u32 default_snk_caps[] = { 0x2601912C }; /* VSafe5V @ 3A */ struct vdm_tx { - u32 data[7]; + u32 data[PD_MAX_DATA_OBJ]; int size; }; struct rx_msg { - u8 type; - u8 len; - u32 payload[7]; + u16 hdr; + u16 data_len; /* size of payload in bytes */ struct list_head entry; + u8 payload[]; }; -#define IS_DATA(m, t) ((m) && ((m)->len) && ((m)->type == (t))) -#define IS_CTRL(m, t) ((m) && !((m)->len) && ((m)->type == (t))) +#define IS_DATA(m, t) ((m) && !PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \ + PD_MSG_HDR_COUNT((m)->hdr) && \ + (PD_MSG_HDR_TYPE((m)->hdr) == (t))) +#define IS_CTRL(m, t) ((m) && !PD_MSG_HDR_COUNT((m)->hdr) && \ + (PD_MSG_HDR_TYPE((m)->hdr) == (t))) +#define IS_EXT(m, t) ((m) && PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \ + (PD_MSG_HDR_TYPE((m)->hdr) == (t))) struct usbpd { struct device dev; @@ -318,8 +366,10 @@ struct usbpd { bool hard_reset_recvd; struct list_head rx_q; spinlock_t rx_lock; + struct rx_msg *rx_ext_msg; u32 received_pdos[PD_MAX_DATA_OBJ]; + u32 received_ado; u16 src_cap_id; u8 selected_pdo; u8 requested_pdo; @@ -351,6 +401,8 @@ struct usbpd { bool pd_phy_opened; bool send_request; struct completion is_ready; + struct completion tx_chunk_request; + u8 next_tx_chunk; struct mutex swap_lock; struct dual_role_phy_instance *dual_role; @@ -377,6 +429,19 @@ struct usbpd { struct list_head svid_handlers; struct list_head instance; + + /* ext msg support */ + bool send_get_src_cap_ext; + u8 src_cap_ext_db[PD_SRC_CAP_EXT_DB_LEN]; + bool send_get_pps_status; + u32 pps_status_db; + u8 status_db[PD_STATUS_DB_LEN]; + bool send_get_battery_cap; + u8 get_battery_cap_db; + u8 battery_cap_db[PD_BATTERY_CAP_DB_LEN]; + u8 get_battery_status_db; + bool send_get_battery_status; + u32 battery_sts_dobj; }; static LIST_HEAD(_usbpd); /* useful for debugging */ @@ -498,6 +563,57 @@ static int pd_send_msg(struct usbpd *pd, u8 msg_type, const u32 *data, return 0; } +static int pd_send_ext_msg(struct usbpd *pd, u8 msg_type, + const u8 *data, size_t data_len, enum pd_sop_type sop) +{ + int ret; + size_t len_remain, chunk_len; + u8 chunked_payload[PD_MAX_DATA_OBJ * sizeof(u32)] = {0}; + u16 hdr; + u16 ext_hdr; + u8 num_objs; + + if (data_len > PD_MAX_EXT_MSG_LEN) { + usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n"); + data_len = PD_MAX_EXT_MSG_LEN; + } + + pd->next_tx_chunk = 0; + len_remain = data_len; + do { + ext_hdr = PD_MSG_EXT_HDR(1, pd->next_tx_chunk++, 0, data_len); + memcpy(chunked_payload, &ext_hdr, sizeof(ext_hdr)); + + chunk_len = min_t(size_t, len_remain, + PD_MAX_EXT_MSG_LEGACY_LEN); + memcpy(chunked_payload + sizeof(ext_hdr), data, chunk_len); + + num_objs = DIV_ROUND_UP(chunk_len + sizeof(u16), sizeof(u32)); + len_remain -= chunk_len; + + reinit_completion(&pd->tx_chunk_request); + hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr, + pd->tx_msgid, num_objs, pd->spec_rev) | + PD_MSG_HDR_EXTENDED; + ret = pd_phy_write(hdr, chunked_payload, + num_objs * sizeof(u32), sop); + if (ret) + return ret; + + pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID; + + /* Wait for request chunk */ + if (len_remain && + !wait_for_completion_timeout(&pd->tx_chunk_request, + msecs_to_jiffies(SENDER_RESPONSE_TIME))) { + usbpd_err(&pd->dev, "Timed out waiting for chunk request\n"); + return -EPROTO; + } + } while (len_remain); + + return 0; +} + static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua) { int curr; @@ -629,6 +745,150 @@ static void phy_sig_received(struct usbpd *pd, enum pd_sig_type sig) kick_sm(pd, 0); } +struct pd_request_chunk { + struct work_struct w; + struct usbpd *pd; + u8 msg_type; + u8 chunk_num; + enum pd_sop_type sop; +}; + +static void pd_request_chunk_work(struct work_struct *w) +{ + struct pd_request_chunk *req = + container_of(w, struct pd_request_chunk, w); + struct usbpd *pd = req->pd; + unsigned long flags; + int ret; + u8 payload[4] = {0}; /* ext_hdr + padding */ + u16 hdr = PD_MSG_HDR(req->msg_type, pd->current_dr, pd->current_pr, + pd->tx_msgid, 1, pd->spec_rev) | PD_MSG_HDR_EXTENDED; + + *(u16 *)payload = PD_MSG_EXT_HDR(1, req->chunk_num, 1, 0); + + ret = pd_phy_write(hdr, payload, sizeof(payload), req->sop); + if (!ret) { + pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID; + } else { + usbpd_err(&pd->dev, "could not send chunk request\n"); + + /* queue what we have anyway */ + spin_lock_irqsave(&pd->rx_lock, flags); + list_add_tail(&pd->rx_ext_msg->entry, &pd->rx_q); + spin_unlock_irqrestore(&pd->rx_lock, flags); + + pd->rx_ext_msg = NULL; + } + + kfree(req); +} + +static struct rx_msg *pd_ext_msg_received(struct usbpd *pd, u16 header, u8 *buf, + size_t len, enum pd_sop_type sop) +{ + struct rx_msg *rx_msg; + u16 bytes_to_copy; + u16 ext_hdr = *(u16 *)buf; + u8 chunk_num; + + if (!PD_MSG_EXT_HDR_IS_CHUNKED(ext_hdr)) { + usbpd_err(&pd->dev, "unchunked extended messages unsupported\n"); + return NULL; + } + + /* request for next Tx chunk */ + if (PD_MSG_EXT_HDR_REQ_CHUNK(ext_hdr)) { + if (PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr) || + PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr) != + pd->next_tx_chunk) { + usbpd_err(&pd->dev, "invalid request chunk ext header 0x%02x\n", + ext_hdr); + return NULL; + } + + if (!completion_done(&pd->tx_chunk_request)) + complete(&pd->tx_chunk_request); + + return NULL; + } + + chunk_num = PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr); + if (!chunk_num) { + /* allocate new message if first chunk */ + rx_msg = kzalloc(sizeof(*rx_msg) + + PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr), + GFP_KERNEL); + if (!rx_msg) + return NULL; + + rx_msg->hdr = header; + rx_msg->data_len = PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr); + + if (rx_msg->data_len > PD_MAX_EXT_MSG_LEN) { + usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n"); + rx_msg->data_len = PD_MAX_EXT_MSG_LEN; + } + } else { + if (!pd->rx_ext_msg) { + usbpd_err(&pd->dev, "missing first rx_ext_msg chunk\n"); + return NULL; + } + + rx_msg = pd->rx_ext_msg; + } + + /* + * The amount to copy is derived as follows: + * + * - if extended data_len < 26, then copy data_len bytes + * - for chunks 0..N-2, copy 26 bytes + * - for the last chunk (N-1), copy the remainder + */ + bytes_to_copy = + min((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN), + PD_MAX_EXT_MSG_LEGACY_LEN); + + /* check against received length to avoid overrun */ + if (bytes_to_copy > len - sizeof(ext_hdr)) { + usbpd_warn(&pd->dev, "not enough bytes in chunk, expected:%u received:%zu\n", + bytes_to_copy, len - sizeof(ext_hdr)); + bytes_to_copy = len - sizeof(ext_hdr); + } + + memcpy(rx_msg->payload + chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN, buf + 2, + bytes_to_copy); + + /* request next chunk? */ + if ((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN) > + PD_MAX_EXT_MSG_LEGACY_LEN) { + struct pd_request_chunk *req; + + if (pd->rx_ext_msg && pd->rx_ext_msg != rx_msg) { + usbpd_dbg(&pd->dev, "stale previous rx_ext_msg?\n"); + kfree(pd->rx_ext_msg); + } + + pd->rx_ext_msg = rx_msg; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + goto queue_rx; /* return what we have anyway */ + + INIT_WORK(&req->w, pd_request_chunk_work); + req->pd = pd; + req->msg_type = PD_MSG_HDR_TYPE(header); + req->chunk_num = chunk_num + 1; + req->sop = sop; + queue_work(pd->wq, &req->w); + + return NULL; + } + +queue_rx: + pd->rx_ext_msg = NULL; + return rx_msg; /* queue it for usbpd_sm */ +} + static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop, u8 *buf, size_t len) { @@ -676,21 +936,31 @@ static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop, return; } - rx_msg = kzalloc(sizeof(*rx_msg), GFP_KERNEL); - if (!rx_msg) - return; + /* if spec rev differs (i.e. is older), update PHY */ + if (PD_MSG_HDR_REV(header) < pd->spec_rev) + pd->spec_rev = PD_MSG_HDR_REV(header); + + usbpd_dbg(&pd->dev, "received message: type(%d) num_objs(%d)\n", + PD_MSG_HDR_TYPE(header), PD_MSG_HDR_COUNT(header)); + + if (!PD_MSG_HDR_IS_EXTENDED(header)) { + rx_msg = kzalloc(sizeof(*rx_msg) + len, GFP_KERNEL); + if (!rx_msg) + return; - rx_msg->type = PD_MSG_HDR_TYPE(header); - rx_msg->len = PD_MSG_HDR_COUNT(header); - memcpy(&rx_msg->payload, buf, min(len, sizeof(rx_msg->payload))); + rx_msg->hdr = header; + rx_msg->data_len = len; + memcpy(rx_msg->payload, buf, len); + } else { + rx_msg = pd_ext_msg_received(pd, header, buf, len, sop); + if (!rx_msg) + return; + } spin_lock_irqsave(&pd->rx_lock, flags); list_add_tail(&rx_msg->entry, &pd->rx_q); spin_unlock_irqrestore(&pd->rx_lock, flags); - usbpd_dbg(&pd->dev, "received message: type(%d) len(%d)\n", - rx_msg->type, rx_msg->len); - kick_sm(pd, 0); } @@ -1140,11 +1410,13 @@ EXPORT_SYMBOL(usbpd_send_svdm); static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg) { - u32 vdm_hdr = rx_msg->payload[0]; - u32 *vdos = &rx_msg->payload[1]; + u32 vdm_hdr = + rx_msg->data_len >= sizeof(u32) ? ((u32 *)rx_msg->payload)[0] : 0; + + u32 *vdos = (u32 *)&rx_msg->payload[sizeof(u32)]; u16 svid = VDM_HDR_SVID(vdm_hdr); u16 *psvid; - u8 i, num_vdos = rx_msg->len - 1; /* num objects minus header */ + u8 i, num_vdos = PD_MSG_HDR_COUNT(rx_msg->hdr) - 1; u8 cmd = SVDM_HDR_CMD(vdm_hdr); u8 cmd_type = SVDM_HDR_CMD_TYPE(vdm_hdr); bool has_dp = false; @@ -1757,7 +2029,7 @@ static void usbpd_sm(struct work_struct *w) case PE_SRC_SEND_CAPABILITIES_WAIT: if (IS_DATA(rx_msg, MSG_REQUEST)) { - pd->rdo = rx_msg->payload[0]; + pd->rdo = *(u32 *)rx_msg->payload; usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY); } else if (rx_msg) { usbpd_err(&pd->dev, "Unexpected message received\n"); @@ -1780,7 +2052,7 @@ static void usbpd_sm(struct work_struct *w) usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); } } else if (IS_DATA(rx_msg, MSG_REQUEST)) { - pd->rdo = rx_msg->payload[0]; + pd->rdo = *(u32 *)rx_msg->payload; usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY); } else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) { if (pd->vdm_state == MODE_ENTERED) { @@ -1822,6 +2094,15 @@ static void usbpd_sm(struct work_struct *w) vconn_swap(pd); } else if (IS_DATA(rx_msg, MSG_VDM)) { handle_vdm_rx(pd, rx_msg); + } else if (rx_msg && pd->spec_rev == USBPD_REV_30) { + /* unhandled messages */ + ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0, + SOP_MSG); + if (ret) { + usbpd_err(&pd->dev, "Error sending Not supported\n"); + usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET); + } + break; } else if (pd->send_pr_swap) { pd->send_pr_swap = false; ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG); @@ -2062,7 +2343,8 @@ static void usbpd_sm(struct work_struct *w) usbpd_err(&pd->dev, "Error sending Sink Caps\n"); usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); } - } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) { + } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP) && + pd->spec_rev == USBPD_REV_20) { ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps, ARRAY_SIZE(default_src_caps), SOP_MSG); @@ -2085,7 +2367,8 @@ static void usbpd_sm(struct work_struct *w) } dr_swap(pd); - } else if (IS_CTRL(rx_msg, MSG_PR_SWAP)) { + } else if (IS_CTRL(rx_msg, MSG_PR_SWAP) && + pd->spec_rev == USBPD_REV_20) { /* lock in current mode */ set_power_role(pd, pd->current_pr); @@ -2103,7 +2386,8 @@ static void usbpd_sm(struct work_struct *w) POWER_SUPPLY_PROP_PR_SWAP, &val); usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF); break; - } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) { + } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP) && + pd->spec_rev == USBPD_REV_20) { /* * if VCONN is connected to VBUS, make sure we are * not in high voltage contract, otherwise reject. @@ -2131,6 +2415,120 @@ static void usbpd_sm(struct work_struct *w) vconn_swap(pd); } else if (IS_DATA(rx_msg, MSG_VDM)) { handle_vdm_rx(pd, rx_msg); + } else if (pd->send_get_src_cap_ext && is_sink_tx_ok(pd)) { + pd->send_get_src_cap_ext = false; + ret = pd_send_msg(pd, MSG_GET_SOURCE_CAP_EXTENDED, NULL, + 0, SOP_MSG); + if (ret) { + dev_err(&pd->dev, + "Error sending get_src_cap_ext\n"); + usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + break; + } + kick_sm(pd, SENDER_RESPONSE_TIME); + } else if (rx_msg && + IS_EXT(rx_msg, MSG_SOURCE_CAPABILITIES_EXTENDED)) { + if (rx_msg->data_len != PD_SRC_CAP_EXT_DB_LEN) { + usbpd_err(&pd->dev, "Invalid src cap ext db\n"); + break; + } + memcpy(&pd->src_cap_ext_db, rx_msg->payload, + sizeof(pd->src_cap_ext_db)); + complete(&pd->is_ready); + } else if (pd->send_get_pps_status && is_sink_tx_ok(pd)) { + pd->send_get_pps_status = false; + ret = pd_send_msg(pd, MSG_GET_PPS_STATUS, NULL, + 0, SOP_MSG); + if (ret) { + dev_err(&pd->dev, + "Error sending get_pps_status\n"); + usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + break; + } + kick_sm(pd, SENDER_RESPONSE_TIME); + } else if (rx_msg && + IS_EXT(rx_msg, MSG_PPS_STATUS)) { + if (rx_msg->data_len != sizeof(pd->pps_status_db)) { + usbpd_err(&pd->dev, "Invalid pps status db\n"); + break; + } + memcpy(&pd->pps_status_db, rx_msg->payload, + sizeof(pd->pps_status_db)); + complete(&pd->is_ready); + } else if (IS_DATA(rx_msg, MSG_ALERT)) { + if (rx_msg->data_len != sizeof(pd->received_ado)) { + usbpd_err(&pd->dev, "Invalid ado\n"); + break; + } + memcpy(&pd->received_ado, rx_msg->payload, + sizeof(pd->received_ado)); + ret = pd_send_msg(pd, MSG_GET_STATUS, NULL, + 0, SOP_MSG); + if (ret) { + dev_err(&pd->dev, + "Error sending get_status\n"); + usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + break; + } + kick_sm(pd, SENDER_RESPONSE_TIME); + } else if (rx_msg && + IS_EXT(rx_msg, MSG_STATUS)) { + if (rx_msg->data_len != PD_STATUS_DB_LEN) { + usbpd_err(&pd->dev, "Invalid status db\n"); + break; + } + memcpy(&pd->status_db, rx_msg->payload, + sizeof(pd->status_db)); + kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE); + } else if (pd->send_get_battery_cap && is_sink_tx_ok(pd)) { + pd->send_get_battery_cap = false; + ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_CAP, + &pd->get_battery_cap_db, 1, SOP_MSG); + if (ret) { + dev_err(&pd->dev, + "Error sending get_battery_cap\n"); + usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + break; + } + kick_sm(pd, SENDER_RESPONSE_TIME); + } else if (rx_msg && + IS_EXT(rx_msg, MSG_BATTERY_CAPABILITIES)) { + if (rx_msg->data_len != PD_BATTERY_CAP_DB_LEN) { + usbpd_err(&pd->dev, "Invalid battery cap db\n"); + break; + } + memcpy(&pd->battery_cap_db, rx_msg->payload, + sizeof(pd->battery_cap_db)); + complete(&pd->is_ready); + } else if (pd->send_get_battery_status && is_sink_tx_ok(pd)) { + pd->send_get_battery_status = false; + ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_STATUS, + &pd->get_battery_status_db, 1, SOP_MSG); + if (ret) { + dev_err(&pd->dev, + "Error sending get_battery_status\n"); + usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + break; + } + kick_sm(pd, SENDER_RESPONSE_TIME); + } else if (rx_msg && + IS_EXT(rx_msg, MSG_BATTERY_STATUS)) { + if (rx_msg->data_len != sizeof(pd->battery_sts_dobj)) { + usbpd_err(&pd->dev, "Invalid bat sts dobj\n"); + break; + } + memcpy(&pd->battery_sts_dobj, rx_msg->payload, + sizeof(pd->battery_sts_dobj)); + complete(&pd->is_ready); + } else if (rx_msg && pd->spec_rev == USBPD_REV_30) { + /* unhandled messages */ + ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0, + SOP_MSG); + if (ret) { + usbpd_err(&pd->dev, "Error sending Not supported\n"); + usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET); + } + break; } else if (pd->send_request) { pd->send_request = false; usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY); @@ -2779,6 +3177,10 @@ static int usbpd_uevent(struct device *dev, struct kobj_uevent_env *env) "explicit" : "implicit"); add_uevent_var(env, "ALT_MODE=%d", pd->vdm_state == MODE_ENTERED); + add_uevent_var(env, "ADO=%08x", pd->received_ado); + for (i = 0; i < PD_STATUS_DB_LEN; i++) + add_uevent_var(env, "SDB%d=%08x", i, pd->status_db[i]); + return 0; } @@ -3126,6 +3528,145 @@ static ssize_t hard_reset_store(struct device *dev, } static DEVICE_ATTR_WO(hard_reset); +static int trigger_tx_msg(struct usbpd *pd, bool *msg_tx_flag) +{ + int ret = 0; + + /* Only allowed if we are already in explicit sink contract */ + if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) { + usbpd_err(&pd->dev, "%s: Cannot send msg\n", __func__); + ret = -EBUSY; + goto out; + } + + reinit_completion(&pd->is_ready); + *msg_tx_flag = true; + kick_sm(pd, 0); + + /* wait for operation to complete */ + if (!wait_for_completion_timeout(&pd->is_ready, + msecs_to_jiffies(1000))) { + usbpd_err(&pd->dev, "%s: request timed out\n", __func__); + ret = -ETIMEDOUT; + } + +out: + *msg_tx_flag = false; + return ret; + +} + +static ssize_t get_src_cap_ext_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i, ret, len = 0; + struct usbpd *pd = dev_get_drvdata(dev); + + if (pd->spec_rev == USBPD_REV_20) + return -EINVAL; + + ret = trigger_tx_msg(pd, &pd->send_get_src_cap_ext); + if (ret) + return ret; + + for (i = 0; i < PD_SRC_CAP_EXT_DB_LEN; i++) + len += snprintf(buf + len, PAGE_SIZE - len, "%d\n", + pd->src_cap_ext_db[i]); + return len; +} +static DEVICE_ATTR_RO(get_src_cap_ext); + +static ssize_t get_pps_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret; + struct usbpd *pd = dev_get_drvdata(dev); + + if (pd->spec_rev == USBPD_REV_20) + return -EINVAL; + + ret = trigger_tx_msg(pd, &pd->send_get_pps_status); + if (ret) + return ret; + + return snprintf(buf, PAGE_SIZE, "%d\n", pd->pps_status_db); +} +static DEVICE_ATTR_RO(get_pps_status); + +static ssize_t rx_ado_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct usbpd *pd = dev_get_drvdata(dev); + + /* dump the ADO as a hex string */ + return snprintf(buf, PAGE_SIZE, "%08x\n", pd->received_ado); +} +static DEVICE_ATTR_RO(rx_ado); + +static ssize_t get_battery_cap_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct usbpd *pd = dev_get_drvdata(dev); + int val, ret; + + if (pd->spec_rev == USBPD_REV_20 || sscanf(buf, "%d\n", &val) != 1) { + pd->get_battery_cap_db = -EINVAL; + return -EINVAL; + } + + pd->get_battery_cap_db = val; + + ret = trigger_tx_msg(pd, &pd->send_get_battery_cap); + + return ret ? ret : size; +} + +static ssize_t get_battery_cap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i, len = 0; + struct usbpd *pd = dev_get_drvdata(dev); + + if (pd->get_battery_cap_db == -EINVAL) + return -EINVAL; + + for (i = 0; i < PD_BATTERY_CAP_DB_LEN; i++) + len += snprintf(buf + len, PAGE_SIZE - len, "%d\n", + pd->battery_cap_db[i]); + return len; +} +static DEVICE_ATTR_RW(get_battery_cap); + +static ssize_t get_battery_status_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct usbpd *pd = dev_get_drvdata(dev); + int val, ret; + + if (pd->spec_rev == USBPD_REV_20 || sscanf(buf, "%d\n", &val) != 1) { + pd->get_battery_status_db = -EINVAL; + return -EINVAL; + } + + pd->get_battery_status_db = val; + + ret = trigger_tx_msg(pd, &pd->send_get_battery_status); + + return ret ? ret : size; +} + +static ssize_t get_battery_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usbpd *pd = dev_get_drvdata(dev); + + if (pd->get_battery_status_db == -EINVAL) + return -EINVAL; + + return snprintf(buf, PAGE_SIZE, "%d\n", pd->battery_sts_dobj); +} +static DEVICE_ATTR_RW(get_battery_status); + static struct attribute *usbpd_attrs[] = { &dev_attr_contract.attr, &dev_attr_initial_pr.attr, @@ -3145,6 +3686,11 @@ static struct attribute *usbpd_attrs[] = { &dev_attr_rdo.attr, &dev_attr_rdo_h.attr, &dev_attr_hard_reset.attr, + &dev_attr_get_src_cap_ext.attr, + &dev_attr_get_pps_status.attr, + &dev_attr_rx_ado.attr, + &dev_attr_get_battery_cap.attr, + &dev_attr_get_battery_status.attr, NULL, }; ATTRIBUTE_GROUPS(usbpd); @@ -3375,6 +3921,7 @@ struct usbpd *usbpd_create(struct device *parent) INIT_LIST_HEAD(&pd->rx_q); INIT_LIST_HEAD(&pd->svid_handlers); init_completion(&pd->is_ready); + init_completion(&pd->tx_chunk_request); pd->psy_nb.notifier_call = psy_changed; ret = power_supply_reg_notifier(&pd->psy_nb); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index b0dc6da3d970..41a6513646de 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -135,6 +135,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ + { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ebe51f11105d..fe123153b1a5 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 1db4b61bdf7b..a51b28379850 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 09d9be88209e..3b5a15d1dc0d 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -27,6 +27,7 @@ #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 #define ATEN_PRODUCT_ID 0x2008 +#define ATEN_PRODUCT_UC485 0x2021 #define ATEN_PRODUCT_ID2 0x2118 #define IODATA_VENDOR_ID 0x04bb diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 53341a77d89f..a37ed1e59e99 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -123,9 +123,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, "Initio Corporation", - "", + "INIC-3069", USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_NO_ATA_1X), + US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE), /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, diff --git a/drivers/video/fbdev/msm/mdss_cec_core.c b/drivers/video/fbdev/msm/mdss_cec_core.c index 4b53b01be709..1d9950494d65 100644 --- a/drivers/video/fbdev/msm/mdss_cec_core.c +++ b/drivers/video/fbdev/msm/mdss_cec_core.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -681,7 +681,7 @@ static ssize_t cec_wta_msg(struct device *dev, } spin_unlock_irqrestore(&ctl->lock, flags); - if (msg->frame_size > MAX_OPERAND_SIZE) { + if (msg->frame_size > MAX_CEC_FRAME_SIZE) { pr_err("msg frame too big!\n"); ret = -EINVAL; goto end; diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c index a98e5a9007bd..bc325a91a9bf 100644 --- a/drivers/video/fbdev/msm/mdss_dp.c +++ b/drivers/video/fbdev/msm/mdss_dp.c @@ -68,6 +68,7 @@ static int mdss_dp_process_phy_test_pattern_request( struct mdss_dp_drv_pdata *dp); static int mdss_dp_send_audio_notification( struct mdss_dp_drv_pdata *dp, int val); +static void mdss_dp_reset_sw_state(struct mdss_dp_drv_pdata *dp); static inline void mdss_dp_reset_sink_count(struct mdss_dp_drv_pdata *dp) { @@ -1489,7 +1490,12 @@ static int mdss_dp_setup_main_link(struct mdss_dp_drv_pdata *dp, bool train) pr_debug("enter\n"); mdss_dp_mainlink_ctrl(&dp->ctrl_io, true); - mdss_dp_aux_set_sink_power_state(dp, SINK_POWER_ON); + ret = mdss_dp_aux_send_psm_request(dp, false); + if (ret) { + pr_err("Failed to exit low power mode, rc=%d\n", ret); + goto end; + } + reinit_completion(&dp->video_comp); if (mdss_dp_is_phy_test_pattern_requested(dp)) @@ -1576,15 +1582,6 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed) dp_drv->power_on = true; - if (dp_drv->psm_enabled) { - ret = mdss_dp_aux_send_psm_request(dp_drv, false); - if (ret) { - pr_err("Failed to exit low power mode, rc=%d\n", - ret); - goto exit_loop; - } - } - ret = mdss_dp_setup_main_link(dp_drv, lt_needed); exit_loop: @@ -1637,6 +1634,7 @@ int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv) dp_drv->link_rate = mdss_dp_gen_link_clk(dp_drv); if (!dp_drv->link_rate) { pr_err("Unable to configure required link rate\n"); + mdss_dp_clk_ctrl(dp_drv, DP_CORE_PM, false); ret = -EINVAL; goto exit; } @@ -1652,15 +1650,6 @@ int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv) mdss_dp_configure_source_params(dp_drv, ln_map); - if (dp_drv->psm_enabled) { - ret = mdss_dp_aux_send_psm_request(dp_drv, false); - if (ret) { - pr_err("Failed to exit low power mode, rc=%d\n", ret); - goto exit; - } - } - - link_training: dp_drv->power_on = true; @@ -2116,6 +2105,12 @@ static int mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp, goto invalid_request; if (dp->hpd_notification_status == NOTIFY_DISCONNECT_IRQ_HPD) { /* + * Just in case if NOTIFY_DISCONNECT_IRQ_HPD is timedout + */ + if (dp->power_on) + mdss_dp_state_ctrl(&dp->ctrl_io, ST_PUSH_IDLE); + + /* * user modules already turned off. Need to explicitly * turn off DP core here. */ @@ -2982,6 +2977,7 @@ static int mdss_dp_sysfs_create(struct mdss_dp_drv_pdata *dp, static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata) { + bool cable_connected; struct mdss_dp_drv_pdata *dp_drv = NULL; const int idle_pattern_completion_timeout_ms = 3 * HZ / 100; @@ -2996,6 +2992,20 @@ static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata) /* wait until link training is completed */ mutex_lock(&dp_drv->train_mutex); + if (!dp_drv->power_on) { + pr_err("DP Controller not powered on\n"); + mutex_unlock(&dp_drv->train_mutex); + return; + } + + /* power down the sink if cable is still connected */ + mutex_lock(&dp_drv->attention_lock); + cable_connected = dp_drv->cable_connected; + mutex_unlock(&dp_drv->attention_lock); + if (cable_connected && dp_drv->alt_mode.dp_status.hpd_high) { + if (mdss_dp_aux_send_psm_request(dp_drv, true)) + pr_err("Failed to enter low power mode\n"); + } reinit_completion(&dp_drv->idle_comp); mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE); if (!wait_for_completion_timeout(&dp_drv->idle_comp, @@ -3116,6 +3126,10 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata, pr_err("DP Controller not powered on\n"); break; } + if (!atomic_read(&dp->notification_pending)) { + pr_debug("blank when cable is connected\n"); + kthread_park(dp->ev_thread); + } if (dp_is_hdcp_enabled(dp)) { dp->hdcp_status = HDCP_STATE_INACTIVE; @@ -3155,8 +3169,10 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata, * when you connect DP sink while the * device is in suspend state. */ - if ((!dp->power_on) && (dp->dp_initialized)) + if ((!dp->power_on) && (dp->dp_initialized)) { rc = mdss_dp_host_deinit(dp); + kthread_park(dp->ev_thread); + } /* * For DP suspend/resume use case, CHECK_PARAMS is @@ -3168,8 +3184,11 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata, dp->suspend_vic = dp->vic; break; case MDSS_EVENT_RESUME: - if (dp->suspend_vic != HDMI_VFRMT_UNKNOWN) + if (dp->suspend_vic != HDMI_VFRMT_UNKNOWN) { dp_init_panel_info(dp, dp->suspend_vic); + mdss_dp_reset_sw_state(dp); + kthread_unpark(dp->ev_thread); + } break; default: pr_debug("unhandled event=%d\n", event); diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h index afa8e3db590f..983f5e34a515 100644 --- a/drivers/video/fbdev/msm/mdss_dp.h +++ b/drivers/video/fbdev/msm/mdss_dp.h @@ -218,10 +218,6 @@ struct dp_alt_mode { #define ST_SEND_VIDEO BIT(7) #define ST_PUSH_IDLE BIT(8) -/* sink power state */ -#define SINK_POWER_ON 1 -#define SINK_POWER_OFF 2 - #define DP_LINK_RATE_162 6 /* 1.62G = 270M * 6 */ #define DP_LINK_RATE_270 10 /* 2.70G = 270M * 10 */ #define DP_LINK_RATE_540 20 /* 5.40G = 270M * 20 */ @@ -1181,11 +1177,9 @@ void dp_aux_native_handler(struct mdss_dp_drv_pdata *dp, u32 isr); void mdss_dp_aux_init(struct mdss_dp_drv_pdata *ep); void mdss_dp_fill_link_cfg(struct mdss_dp_drv_pdata *ep); -void mdss_dp_sink_power_down(struct mdss_dp_drv_pdata *ep); void mdss_dp_lane_power_ctrl(struct mdss_dp_drv_pdata *ep, int up); void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *ep); char mdss_dp_gen_link_clk(struct mdss_dp_drv_pdata *dp); -int mdss_dp_aux_set_sink_power_state(struct mdss_dp_drv_pdata *ep, char state); int mdss_dp_aux_send_psm_request(struct mdss_dp_drv_pdata *dp, bool enable); void mdss_dp_aux_send_test_response(struct mdss_dp_drv_pdata *ep); void *mdss_dp_get_hdcp_data(struct device *dev); diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c index 786fe10055da..86946adfeeb0 100644 --- a/drivers/video/fbdev/msm/mdss_dp_aux.c +++ b/drivers/video/fbdev/msm/mdss_dp_aux.c @@ -684,6 +684,11 @@ char mdss_dp_gen_link_clk(struct mdss_dp_drv_pdata *dp) pr_debug("clk_rate=%llu, bpp= %d, lane_cnt=%d\n", pinfo->clk_rate, pinfo->bpp, lane_cnt); + if (lane_cnt == 0) { + pr_warn("Invalid max lane count\n"); + return 0; + } + /* * The max pixel clock supported is 675Mhz. The * current calculations below will make sure @@ -2551,15 +2556,6 @@ static int dp_link_rate_down_shift(struct mdss_dp_drv_pdata *ep) return ret; } -int mdss_dp_aux_set_sink_power_state(struct mdss_dp_drv_pdata *ep, char state) -{ - int ret; - - ret = dp_aux_write_buf(ep, 0x600, &state, 1, 0); - pr_debug("state=%d ret=%d\n", state, ret); - return ret; -} - static void dp_clear_training_pattern(struct mdss_dp_drv_pdata *ep) { int usleep_time; diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c index 5f7e7c6bcde0..7b6153503af5 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.c +++ b/drivers/video/fbdev/msm/mdss_dsi.c @@ -968,7 +968,7 @@ static int mdss_dsi_cmd_flush(struct file *file, fl_owner_t id) while (len >= sizeof(*dchdr)) { dchdr = (struct dsi_ctrl_hdr *)bp; dchdr->dlen = ntohs(dchdr->dlen); - if (dchdr->dlen > len) { + if (dchdr->dlen > len || dchdr->dlen < 0) { pr_err("%s: dtsi cmd=%x error, len=%d\n", __func__, dchdr->dtype, dchdr->dlen); kfree(buf); diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c index fe1289633291..d35858137191 100644 --- a/drivers/video/fbdev/msm/mdss_fb.c +++ b/drivers/video/fbdev/msm/mdss_fb.c @@ -4697,6 +4697,7 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info, input_layer_list = commit.commit_v1.input_layers; if (layer_count > MAX_LAYER_COUNT) { + pr_err("invalid layer count :%d\n", layer_count); ret = -EINVAL; goto err; } else if (layer_count) { diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c index 2143c2bdb84b..a49c5290753c 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c @@ -2322,6 +2322,13 @@ int hdmi_edid_parser(void *input) goto bail; } + /* Find out if CEA extension blocks exceeding max limit */ + if (num_of_cea_blocks >= MAX_EDID_BLOCKS) { + DEV_WARN("%s: HDMI EDID exceeded max CEA blocks limit\n", + __func__); + num_of_cea_blocks = MAX_EDID_BLOCKS - 1; + } + /* check for valid CEA block */ if (edid_buf[EDID_BLOCK_SIZE] != 2) { DEV_ERR("%s: Invalid CEA block\n", __func__); diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c index 837147dc5036..a5a407708334 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c @@ -113,6 +113,7 @@ static void hdmi_tx_fps_work(struct work_struct *work); static int hdmi_tx_pinctrl_set_state(struct hdmi_tx_ctrl *hdmi_ctrl, enum hdmi_tx_power_module_type module, bool active); static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *hdmi_ctrl); +static void hdmi_panel_clear_hdr_infoframe(struct hdmi_tx_ctrl *hdmi_ctrl); static int hdmi_tx_audio_info_setup(struct platform_device *pdev, struct msm_ext_disp_audio_setup_params *params); static int hdmi_tx_get_audio_edid_blk(struct platform_device *pdev, @@ -410,29 +411,33 @@ static inline void hdmi_tx_cec_device_suspend(struct hdmi_tx_ctrl *hdmi_ctrl) hdmi_cec_device_suspend(fd, hdmi_ctrl->panel_suspend); } -static inline void hdmi_tx_send_cable_notification( +static inline void hdmi_tx_send_audio_notification( + struct hdmi_tx_ctrl *hdmi_ctrl, int val) +{ + if (hdmi_ctrl && hdmi_ctrl->ext_audio_data.intf_ops.hpd) { + u32 flags = 0; + + if (!hdmi_tx_is_dvi_mode(hdmi_ctrl)) + flags |= MSM_EXT_DISP_HPD_AUDIO; + + if (flags) + hdmi_ctrl->ext_audio_data.intf_ops.hpd( + hdmi_ctrl->ext_pdev, + hdmi_ctrl->ext_audio_data.type, val, flags); + } +} + +static inline void hdmi_tx_send_video_notification( struct hdmi_tx_ctrl *hdmi_ctrl, int val, bool async) { if (hdmi_ctrl && hdmi_ctrl->ext_audio_data.intf_ops.hpd) { u32 flags = 0; - if (async || hdmi_tx_is_in_splash(hdmi_ctrl)) { + if (async || hdmi_tx_is_in_splash(hdmi_ctrl)) flags |= MSM_EXT_DISP_HPD_ASYNC_VIDEO; - - if (async) { - if (!hdmi_tx_is_dvi_mode(hdmi_ctrl)) - flags |= MSM_EXT_DISP_HPD_ASYNC_AUDIO; - } else - if (!hdmi_tx_is_dvi_mode(hdmi_ctrl)) - flags |= MSM_EXT_DISP_HPD_AUDIO; - - } else { + else flags |= MSM_EXT_DISP_HPD_VIDEO; - if (!hdmi_tx_is_dvi_mode(hdmi_ctrl)) - flags |= MSM_EXT_DISP_HPD_AUDIO; - } - hdmi_ctrl->ext_audio_data.intf_ops.hpd(hdmi_ctrl->ext_pdev, hdmi_ctrl->ext_audio_data.type, val, flags); } @@ -445,6 +450,8 @@ static inline void hdmi_tx_ack_state( !hdmi_tx_is_dvi_mode(hdmi_ctrl)) hdmi_ctrl->ext_audio_data.intf_ops.notify(hdmi_ctrl->ext_pdev, val); + + hdmi_tx_send_audio_notification(hdmi_ctrl, val); } static struct hdmi_tx_ctrl *hdmi_tx_get_drvdata_from_panel_data( @@ -876,7 +883,8 @@ static ssize_t hdmi_tx_sysfs_wta_hpd(struct device *dev, * No need to blocking wait for display/audio in this * case since HAL is not up so no ACK can be expected. */ - hdmi_tx_send_cable_notification(hdmi_ctrl, 0, true); + hdmi_tx_send_audio_notification(hdmi_ctrl, 0); + hdmi_tx_send_video_notification(hdmi_ctrl, 0, true); } break; @@ -1269,6 +1277,7 @@ static ssize_t hdmi_tx_sysfs_wta_hdr_stream(struct device *dev, { int ret = 0; struct hdmi_tx_ctrl *ctrl = NULL; + u8 hdr_op; ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev); if (!ctrl) { @@ -1289,36 +1298,43 @@ static ssize_t hdmi_tx_sysfs_wta_hdr_stream(struct device *dev, goto end; } - memcpy(&ctrl->hdr_data, buf, sizeof(struct mdp_hdr_stream)); + memcpy(&ctrl->hdr_ctrl, buf, sizeof(struct mdp_hdr_stream_ctrl)); pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", __func__, - ctrl->hdr_data.eotf, - ctrl->hdr_data.display_primaries_x[0], - ctrl->hdr_data.display_primaries_y[0], - ctrl->hdr_data.display_primaries_x[1], - ctrl->hdr_data.display_primaries_y[1], - ctrl->hdr_data.display_primaries_x[2], - ctrl->hdr_data.display_primaries_y[2]); + ctrl->hdr_ctrl.hdr_stream.eotf, + ctrl->hdr_ctrl.hdr_stream.display_primaries_x[0], + ctrl->hdr_ctrl.hdr_stream.display_primaries_y[0], + ctrl->hdr_ctrl.hdr_stream.display_primaries_x[1], + ctrl->hdr_ctrl.hdr_stream.display_primaries_y[1], + ctrl->hdr_ctrl.hdr_stream.display_primaries_x[2], + ctrl->hdr_ctrl.hdr_stream.display_primaries_y[2]); pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", __func__, - ctrl->hdr_data.white_point_x, - ctrl->hdr_data.white_point_y, - ctrl->hdr_data.max_luminance, - ctrl->hdr_data.min_luminance, - ctrl->hdr_data.max_content_light_level, - ctrl->hdr_data.max_average_light_level); + ctrl->hdr_ctrl.hdr_stream.white_point_x, + ctrl->hdr_ctrl.hdr_stream.white_point_y, + ctrl->hdr_ctrl.hdr_stream.max_luminance, + ctrl->hdr_ctrl.hdr_stream.min_luminance, + ctrl->hdr_ctrl.hdr_stream.max_content_light_level, + ctrl->hdr_ctrl.hdr_stream.max_average_light_level); pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", __func__, - ctrl->hdr_data.pixel_encoding, - ctrl->hdr_data.colorimetry, - ctrl->hdr_data.range, - ctrl->hdr_data.bits_per_component, - ctrl->hdr_data.content_type); + ctrl->hdr_ctrl.hdr_stream.pixel_encoding, + ctrl->hdr_ctrl.hdr_stream.colorimetry, + ctrl->hdr_ctrl.hdr_stream.range, + ctrl->hdr_ctrl.hdr_stream.bits_per_component, + ctrl->hdr_ctrl.hdr_stream.content_type); + hdr_op = hdmi_hdr_get_ops(ctrl->curr_hdr_state, + ctrl->hdr_ctrl.hdr_state); + + if (hdr_op == HDR_SEND_INFO) + hdmi_panel_set_hdr_infoframe(ctrl); + else if (hdr_op == HDR_CLEAR_INFO) + hdmi_panel_clear_hdr_infoframe(ctrl); - hdmi_panel_set_hdr_infoframe(ctrl); + ctrl->curr_hdr_state = ctrl->hdr_ctrl.hdr_state; ret = strnlen(buf, PAGE_SIZE); end: @@ -2106,6 +2122,8 @@ static int hdmi_tx_init_features(struct hdmi_tx_ctrl *hdmi_ctrl, goto err; } + /* reset HDR state */ + hdmi_ctrl->curr_hdr_state = HDR_DISABLE; return 0; err: hdmi_tx_deinit_features(hdmi_ctrl, deinit_features); @@ -2373,7 +2391,15 @@ static void hdmi_tx_hpd_int_work(struct work_struct *work) mutex_unlock(&hdmi_ctrl->tx_lock); - hdmi_tx_send_cable_notification(hdmi_ctrl, hdmi_ctrl->hpd_state, false); + if (hdmi_ctrl->hpd_state) + hdmi_tx_send_video_notification(hdmi_ctrl, + hdmi_ctrl->hpd_state, true); + else { + hdmi_tx_send_audio_notification(hdmi_ctrl, + hdmi_ctrl->hpd_state); + hdmi_tx_send_video_notification(hdmi_ctrl, + hdmi_ctrl->hpd_state, true); + } } /* hdmi_tx_hpd_int_work */ static int hdmi_tx_check_capability(struct hdmi_tx_ctrl *hdmi_ctrl) @@ -2863,11 +2889,12 @@ static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl) packet_header = type_code | (version << 8) | (length << 16); DSS_REG_W(io, HDMI_GENERIC0_HDR, packet_header); - packet_payload = (ctrl->hdr_data.eotf << 8); + packet_payload = (ctrl->hdr_ctrl.hdr_stream.eotf << 8); if (hdmi_tx_metadata_type_one(ctrl)) { - packet_payload |= (descriptor_id << 16) - | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[0]) - << 24); + packet_payload |= + (descriptor_id << 16) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream. + display_primaries_x[0]) << 24); DSS_REG_W(io, HDMI_GENERIC0_0, packet_payload); } else { pr_debug("%s: Metadata Type 1 not supported\n", __func__); @@ -2876,44 +2903,56 @@ static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl) } packet_payload = - (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[0])) - | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[0]) << 8) - | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[0]) << 16) - | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[1]) << 24); + (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[0])) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream. + display_primaries_y[0]) << 8) + | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream. + display_primaries_y[0]) << 16) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream. + display_primaries_x[1]) << 24); DSS_REG_W(io, HDMI_GENERIC0_1, packet_payload); packet_payload = - (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[1])) - | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[1]) << 8) - | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[1]) << 16) - | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[2]) << 24); + (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[1])) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream. + display_primaries_y[1]) << 8) + | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream. + display_primaries_y[1]) << 16) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream. + display_primaries_x[2]) << 24); DSS_REG_W(io, HDMI_GENERIC0_2, packet_payload); packet_payload = - (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[2])) - | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[2]) << 8) - | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[2]) << 16) - | (HDMI_GET_LSB(ctrl->hdr_data.white_point_x) << 24); + (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[2])) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream. + display_primaries_y[2]) << 8) + | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream. + display_primaries_y[2]) << 16) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.white_point_x) << 24); DSS_REG_W(io, HDMI_GENERIC0_3, packet_payload); packet_payload = - (HDMI_GET_MSB(ctrl->hdr_data.white_point_x)) - | (HDMI_GET_LSB(ctrl->hdr_data.white_point_y) << 8) - | (HDMI_GET_MSB(ctrl->hdr_data.white_point_y) << 16) - | (HDMI_GET_LSB(ctrl->hdr_data.max_luminance) << 24); + (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.white_point_x)) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.white_point_y) << 8) + | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.white_point_y) << 16) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.max_luminance) << 24); DSS_REG_W(io, HDMI_GENERIC0_4, packet_payload); packet_payload = - (HDMI_GET_MSB(ctrl->hdr_data.max_luminance)) - | (HDMI_GET_LSB(ctrl->hdr_data.min_luminance) << 8) - | (HDMI_GET_MSB(ctrl->hdr_data.min_luminance) << 16) - | (HDMI_GET_LSB(ctrl->hdr_data.max_content_light_level) << 24); + (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.max_luminance)) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.min_luminance) << 8) + | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.min_luminance) << 16) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream. + max_content_light_level) << 24); DSS_REG_W(io, HDMI_GENERIC0_5, packet_payload); packet_payload = - (HDMI_GET_MSB(ctrl->hdr_data.max_content_light_level)) - | (HDMI_GET_LSB(ctrl->hdr_data.max_average_light_level) << 8) - | (HDMI_GET_MSB(ctrl->hdr_data.max_average_light_level) << 16); + (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream. + max_content_light_level)) + | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream. + max_average_light_level) << 8) + | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream. + max_average_light_level) << 16); DSS_REG_W(io, HDMI_GENERIC0_6, packet_payload); enable_packet_control: @@ -2928,6 +2967,32 @@ enable_packet_control: DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control); } +static void hdmi_panel_clear_hdr_infoframe(struct hdmi_tx_ctrl *ctrl) +{ + u32 packet_control = 0; + struct dss_io_data *io = NULL; + + if (!ctrl) { + pr_err("%s: invalid input\n", __func__); + return; + } + + if (!hdmi_tx_is_hdr_supported(ctrl)) { + pr_err("%s: Sink does not support HDR\n", __func__); + return; + } + + io = &ctrl->pdata.io[HDMI_TX_CORE_IO]; + if (!io->base) { + pr_err("%s: core io not inititalized\n", __func__); + return; + } + + packet_control = DSS_REG_R_ND(io, HDMI_GEN_PKT_CTRL); + packet_control &= ~HDMI_GEN_PKT_CTRL_CLR_MASK; + DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control); +} + static int hdmi_tx_audio_info_setup(struct platform_device *pdev, struct msm_ext_disp_audio_setup_params *params) { @@ -3145,6 +3210,10 @@ static int hdmi_tx_power_off(struct hdmi_tx_ctrl *hdmi_ctrl) if (hdmi_ctrl->panel_ops.off) hdmi_ctrl->panel_ops.off(pdata); + hdmi_tx_set_mode(hdmi_ctrl, false); + hdmi_tx_phy_reset(hdmi_ctrl); + hdmi_tx_set_mode(hdmi_ctrl, true); + hdmi_tx_core_off(hdmi_ctrl); hdmi_ctrl->panel_power_on = false; @@ -3169,6 +3238,7 @@ static int hdmi_tx_power_on(struct hdmi_tx_ctrl *hdmi_ctrl) void *pdata = hdmi_tx_get_fd(HDMI_TX_FEAT_PANEL); void *edata = hdmi_tx_get_fd(HDMI_TX_FEAT_EDID); + hdmi_ctrl->hdcp_feature_on = hdcp_feature_on; hdmi_ctrl->vic = hdmi_panel_get_vic(&panel_data->panel_info, &hdmi_ctrl->ds_data); @@ -3988,7 +4058,8 @@ static int hdmi_tx_post_evt_handle_resume(struct hdmi_tx_ctrl *hdmi_ctrl) &hdmi_ctrl->hpd_int_done, HZ/10); if (!timeout) { pr_debug("cable removed during suspend\n"); - hdmi_tx_send_cable_notification(hdmi_ctrl, 0, false); + hdmi_tx_send_audio_notification(hdmi_ctrl, 0); + hdmi_tx_send_video_notification(hdmi_ctrl, 0, true); } } @@ -3999,7 +4070,8 @@ static int hdmi_tx_post_evt_handle_panel_on(struct hdmi_tx_ctrl *hdmi_ctrl) { if (hdmi_ctrl->panel_suspend) { pr_debug("panel suspend has triggered\n"); - hdmi_tx_send_cable_notification(hdmi_ctrl, 0, false); + hdmi_tx_send_audio_notification(hdmi_ctrl, 0); + hdmi_tx_send_video_notification(hdmi_ctrl, 0, true); } return 0; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.h b/drivers/video/fbdev/msm/mdss_hdmi_tx.h index 3469b8a5819f..ad02003631f6 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_tx.h +++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.h @@ -21,6 +21,7 @@ #include "mdss_hdmi_audio.h" #define MAX_SWITCH_NAME_SIZE 5 +#define HDMI_GEN_PKT_CTRL_CLR_MASK 0x7 enum hdmi_tx_io_type { HDMI_TX_CORE_IO, @@ -90,7 +91,7 @@ struct hdmi_tx_ctrl { struct msm_ext_disp_audio_setup_params audio_params; struct msm_ext_disp_init_data ext_audio_data; struct work_struct fps_work; - struct mdp_hdr_stream hdr_data; + struct mdp_hdr_stream_ctrl hdr_ctrl; spinlock_t hpd_state_lock; @@ -116,6 +117,7 @@ struct hdmi_tx_ctrl { u8 hdcp_status; u8 spd_vendor_name[9]; u8 spd_product_description[17]; + u8 curr_hdr_state; bool hdcp_feature_on; bool hpd_disabled; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c index 827013d06412..5bc46d8c8f92 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_util.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c @@ -16,6 +16,7 @@ #include <linux/io.h> #include <linux/delay.h> #include <linux/msm_mdp.h> +#include <linux/msm_mdp_ext.h> #include "mdss_hdmi_util.h" #define RESOLUTION_NAME_STR_LEN 30 @@ -1811,3 +1812,51 @@ int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl) return rc; } + +u8 hdmi_hdr_get_ops(u8 curr_state, u8 new_state) +{ + + /** There could be 3 valid state transitions: + * 1. HDR_DISABLE -> HDR_ENABLE + * + * In this transition, we shall start sending + * HDR metadata with metadata from the HDR clip + * + * 2. HDR_ENABLE -> HDR_RESET + * + * In this transition, we will keep sending + * HDR metadata but with EOTF and metadata as 0 + * + * 3. HDR_RESET -> HDR_ENABLE + * + * In this transition, we will start sending + * HDR metadata with metadata from the HDR clip + * + * 4. HDR_RESET -> HDR_DISABLE + * + * In this transition, we will stop sending + * metadata to the sink and clear PKT_CTRL register + * bits. + */ + + if ((curr_state == HDR_DISABLE) + && (new_state == HDR_ENABLE)) { + pr_debug("State changed HDR_DISABLE ---> HDR_ENABLE\n"); + return HDR_SEND_INFO; + } else if ((curr_state == HDR_ENABLE) + && (new_state == HDR_RESET)) { + pr_debug("State changed HDR_ENABLE ---> HDR_RESET\n"); + return HDR_SEND_INFO; + } else if ((curr_state == HDR_RESET) + && (new_state == HDR_ENABLE)) { + pr_debug("State changed HDR_RESET ---> HDR_ENABLE\n"); + return HDR_SEND_INFO; + } else if ((curr_state == HDR_RESET) + && (new_state == HDR_DISABLE)) { + pr_debug("State changed HDR_RESET ---> HDR_DISABLE\n"); + return HDR_CLEAR_INFO; + } + + pr_debug("Unsupported OR no state change\n"); + return HDR_UNSUPPORTED_OP; +} diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h index 4fd659616bcc..fe554f8e9e67 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_util.h +++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -425,6 +425,12 @@ enum hdmi_tx_hdcp2p2_rxstatus_intr_mask { RXSTATUS_REAUTH_REQ = BIT(14), }; +enum hdmi_hdr_op { + HDR_UNSUPPORTED_OP, + HDR_SEND_INFO, + HDR_CLEAR_INFO +}; + struct hdmi_tx_hdcp2p2_ddc_data { enum hdmi_tx_hdcp2p2_rxstatus_intr_mask intr_mask; u32 timeout_ms; @@ -518,5 +524,5 @@ void hdmi_hdcp2p2_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl); int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl); int hdmi_utils_get_timeout_in_hysnc(struct msm_hdmi_mode_timing_info *timing, u32 timeout_ms); - +u8 hdmi_hdr_get_ops(u8 curr_state, u8 new_state); #endif /* __HDMI_UTIL_H__ */ diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h index feea8986af91..54b792305eb5 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.h +++ b/drivers/video/fbdev/msm/mdss_mdp.h @@ -86,7 +86,7 @@ #define XIN_HALT_TIMEOUT_US 0x4000 -#define MAX_LAYER_COUNT 0xC +#define MAX_LAYER_COUNT 0xD /* For SRC QSEED3, when user space does not send the scaler information, * this flag allows pixel _extension to be programmed when scaler is disabled @@ -995,6 +995,8 @@ struct mdss_overlay_private { struct task_struct *thread; u8 secure_transition_state; + + bool cache_null_commit; /* Cache if preceding commit was NULL */ }; struct mdss_mdp_set_ot_params { diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c index ff93c343d41f..9e9f37ce0b23 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_layer.c +++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c @@ -1853,9 +1853,15 @@ static int __validate_secure_session(struct mdss_overlay_private *mdp5_data) pr_err("secure-camera cnt:%d secure video:%d secure display:%d\n", secure_cam_pipes, secure_vid_pipes, sd_pipes); return -EINVAL; - } else { - return 0; + } else if (mdp5_data->ctl->is_video_mode && + ((sd_pipes && !mdp5_data->sd_enabled) || + (!sd_pipes && mdp5_data->sd_enabled)) && + !mdp5_data->cache_null_commit) { + pr_err("NULL commit missing before display secure session entry/exit\n"); + return -EINVAL; } + + return 0; } /* @@ -3191,11 +3197,14 @@ int mdss_mdp_layer_atomic_validate_wfd(struct msm_fb_data_type *mfd, goto validate_failed; } + mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON); rc = mdss_mdp_wfd_setup(wfd, output_layer); if (rc) { pr_err("fail to prepare wfd = %d\n", rc); + mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); goto validate_failed; } + mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); rc = mdss_mdp_layer_atomic_validate(mfd, file, commit); if (rc) { diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c index 305fff6b5695..11c159630747 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c +++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c @@ -2375,6 +2375,8 @@ static void __overlay_set_secure_transition_state(struct msm_fb_data_type *mfd) /* Reset the secure transition state */ mdp5_data->secure_transition_state = SECURE_TRANSITION_NONE; + mdp5_data->cache_null_commit = list_empty(&mdp5_data->pipes_used); + /* * Secure transition would be NONE in two conditions: * 1. All the features are already disabled and state remains @@ -2584,6 +2586,7 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd, ATRACE_BEGIN("sspp_programming"); ret = __overlay_queue_pipes(mfd); ATRACE_END("sspp_programming"); + mutex_unlock(&mdp5_data->list_lock); mdp5_data->kickoff_released = false; diff --git a/drivers/video/fbdev/msm/mdss_rotator.c b/drivers/video/fbdev/msm/mdss_rotator.c index 2028222748c3..78bccdbfee3b 100644 --- a/drivers/video/fbdev/msm/mdss_rotator.c +++ b/drivers/video/fbdev/msm/mdss_rotator.c @@ -1124,6 +1124,7 @@ static void mdss_rotator_release_from_work_distribution( bool free_perf = false; u32 wb_idx = entry->queue->hw->wb_id; + mutex_lock(&mgr->lock); mutex_lock(&entry->perf->work_dis_lock); if (entry->perf->work_distribution[wb_idx]) entry->perf->work_distribution[wb_idx]--; @@ -1147,6 +1148,7 @@ static void mdss_rotator_release_from_work_distribution( mdss_rotator_clk_ctrl(mgr, false); entry->perf = NULL; } + mutex_unlock(&mgr->lock); } } @@ -2043,7 +2045,6 @@ static int mdss_rotator_close_session(struct mdss_rot_mgr *mgr, list_del_init(&perf->list); mutex_unlock(&perf->work_dis_lock); mutex_unlock(&private->perf_lock); - mutex_unlock(&mgr->lock); if (offload_release_work) goto done; @@ -2056,6 +2057,7 @@ static int mdss_rotator_close_session(struct mdss_rot_mgr *mgr, done: pr_debug("Closed session id:%u", id); ATRACE_END(__func__); + mutex_unlock(&mgr->lock); return 0; } diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c index 4da69dbf7dca..1bdd02a6d6ac 100644 --- a/drivers/xen/biomerge.c +++ b/drivers/xen/biomerge.c @@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); - return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && - ((bfn1 == bfn2) || ((bfn1+1) == bfn2)); + return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; #else /* * XXX: Add support for merging bio_vec when using different page diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 26a3b389a265..fa8df3fef6fc 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -183,15 +183,20 @@ cifs_bp_rename_retry: } /* + * Don't allow path components longer than the server max. * Don't allow the separator character in a path component. * The VFS will not allow "/", but "\" is allowed by posix. */ static int -check_name(struct dentry *direntry) +check_name(struct dentry *direntry, struct cifs_tcon *tcon) { struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); int i; + if (unlikely(direntry->d_name.len > + tcon->fsAttrInfo.MaxPathNameComponentLength)) + return -ENAMETOOLONG; + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { for (i = 0; i < direntry->d_name.len; i++) { if (direntry->d_name.name[i] == '\\') { @@ -489,10 +494,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, return finish_no_open(file, res); } - rc = check_name(direntry); - if (rc) - return rc; - xid = get_xid(); cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", @@ -505,6 +506,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, } tcon = tlink_tcon(tlink); + + rc = check_name(direntry, tcon); + if (rc) + goto out_free_xid; + server = tcon->ses->server; if (server->ops->new_lease_key) @@ -765,7 +771,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } pTcon = tlink_tcon(tlink); - rc = check_name(direntry); + rc = check_name(direntry, pTcon); if (rc) goto lookup_out; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index f4afa3b1cc56..6c484ddf26a9 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -2768,8 +2768,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); - kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); - kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); + kst->f_bfree = kst->f_bavail = + le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); return; } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 8772bfc3415b..45ef9975caec 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -500,6 +500,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, lastoff = page_offset(page); bh = head = page_buffers(page); do { + if (lastoff + bh->b_size <= startoff) + goto next; if (buffer_uptodate(bh) || buffer_unwritten(bh)) { if (whence == SEEK_DATA) @@ -514,6 +516,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, unlock_page(page); goto out; } +next: lastoff += bh->b_size; bh = bh->b_this_page; } while (bh != head); diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 34038e3598d5..74516efd874c 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1926,7 +1926,8 @@ retry: n_desc_blocks = o_desc_blocks + le16_to_cpu(es->s_reserved_gdt_blocks); n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); - n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb); + n_blocks_count = (ext4_fsblk_t)n_group * + EXT4_BLOCKS_PER_GROUP(sb); n_group--; /* set to last group number */ } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 66b34b14cb6b..f0de8fe294f4 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -55,7 +55,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) { struct fuse_file *ff; - ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); + ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL); if (unlikely(!ff)) return NULL; diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index f31fd0dd92c6..b1daeafbea92 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -121,6 +121,7 @@ config PNFS_FILE_LAYOUT config PNFS_BLOCK tristate depends on NFS_V4_1 && BLK_DEV_DM + depends on 64BIT || LBDAF default NFS_V4 config PNFS_OBJLAYOUT diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index e125e55de86d..2603d7589946 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -30,6 +30,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds) { nfs4_print_deviceid(&mirror_ds->id_node.deviceid); nfs4_pnfs_ds_put(mirror_ds->ds); + kfree(mirror_ds->ds_versions); kfree_rcu(mirror_ds, id_node.rcu); } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 3f68a25f2169..544672b440de 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -129,7 +129,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp) argp->p = page_address(argp->pagelist[0]); argp->pagelist++; if (argp->pagelen < PAGE_SIZE) { - argp->end = argp->p + (argp->pagelen>>2); + argp->end = argp->p + XDR_QUADLEN(argp->pagelen); argp->pagelen = 0; } else { argp->end = argp->p + (PAGE_SIZE>>2); @@ -1246,9 +1246,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) argp->pagelen -= pages * PAGE_SIZE; len -= pages * PAGE_SIZE; - argp->p = (__be32 *)page_address(argp->pagelist[0]); - argp->pagelist++; - argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE); + next_decode_page(argp); } argp->p += XDR_QUADLEN(len); diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c index 6076c342dae6..5ac0b0bbb0ec 100644 --- a/fs/sdcardfs/file.c +++ b/fs/sdcardfs/file.c @@ -104,12 +104,19 @@ static long sdcardfs_unlocked_ioctl(struct file *file, unsigned int cmd, { long err = -ENOTTY; struct file *lower_file; + const struct cred *saved_cred = NULL; + struct dentry *dentry = file->f_path.dentry; + struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb); lower_file = sdcardfs_lower_file(file); /* XXX: use vfs_ioctl if/when VFS exports it */ if (!lower_file || !lower_file->f_op) goto out; + + /* save current_cred and override it */ + OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file))); + if (lower_file->f_op->unlocked_ioctl) err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg); @@ -117,6 +124,7 @@ static long sdcardfs_unlocked_ioctl(struct file *file, unsigned int cmd, if (!err) sdcardfs_copy_and_fix_attrs(file_inode(file), file_inode(lower_file)); + REVERT_CRED(saved_cred); out: return err; } @@ -127,15 +135,23 @@ static long sdcardfs_compat_ioctl(struct file *file, unsigned int cmd, { long err = -ENOTTY; struct file *lower_file; + const struct cred *saved_cred = NULL; + struct dentry *dentry = file->f_path.dentry; + struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb); lower_file = sdcardfs_lower_file(file); /* XXX: use vfs_ioctl if/when VFS exports it */ if (!lower_file || !lower_file->f_op) goto out; + + /* save current_cred and override it */ + OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file))); + if (lower_file->f_op->compat_ioctl) err = lower_file->f_op->compat_ioctl(lower_file, cmd, arg); + REVERT_CRED(saved_cred); out: return err; } diff --git a/include/linux/cpu.h b/include/linux/cpu.h index a157a69097b5..a3bcdfbef9ca 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -234,6 +234,7 @@ extern struct bus_type cpu_subsys; extern void cpu_hotplug_begin(void); extern void cpu_hotplug_done(void); extern void get_online_cpus(void); +extern void cpu_hotplug_mutex_held(void); extern void put_online_cpus(void); extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); @@ -256,6 +257,7 @@ static inline void cpu_hotplug_done(void) {} #define cpu_hotplug_enable() do { } while (0) #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) +#define cpu_hotplug_mutex_held() do { } while (0) /* These aren't inline functions due to a GCC bug. */ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) #define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 85a868ccb493..8397dc235e84 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -16,6 +16,7 @@ #ifdef CONFIG_CPUSETS +extern struct static_key cpusets_pre_enable_key; extern struct static_key cpusets_enabled_key; static inline bool cpusets_enabled(void) { @@ -30,12 +31,14 @@ static inline int nr_cpusets(void) static inline void cpuset_inc(void) { + static_key_slow_inc(&cpusets_pre_enable_key); static_key_slow_inc(&cpusets_enabled_key); } static inline void cpuset_dec(void) { static_key_slow_dec(&cpusets_enabled_key); + static_key_slow_dec(&cpusets_pre_enable_key); } extern int cpuset_init(void); @@ -104,7 +107,7 @@ extern void cpuset_print_current_mems_allowed(void); */ static inline unsigned int read_mems_allowed_begin(void) { - if (!cpusets_enabled()) + if (!static_key_false(&cpusets_pre_enable_key)) return 0; return read_seqcount_begin(¤t->mems_allowed_seq); @@ -118,7 +121,7 @@ static inline unsigned int read_mems_allowed_begin(void) */ static inline bool read_mems_allowed_retry(unsigned int seq) { - if (!cpusets_enabled()) + if (!static_key_false(&cpusets_enabled_key)) return false; return read_seqcount_retry(¤t->mems_allowed_seq, seq); diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index fc481037478a..f3422440c45f 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -59,7 +59,6 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs); -int iommu_dma_supported(struct device *dev, u64 mask); int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); #else diff --git a/include/linux/habmm.h b/include/linux/habmm.h new file mode 100644 index 000000000000..4d3a81f536d9 --- /dev/null +++ b/include/linux/habmm.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <uapi/linux/habmm.h> + +#ifndef _HABMM_H +#define _HABMM_H + +int32_t habmm_socket_open(int32_t *handle, uint32_t mm_ip_id, + uint32_t timeout, uint32_t flags); +int32_t habmm_socket_close(int32_t handle); +int32_t habmm_socket_send(int32_t handle, void *src_buff, uint32_t size_bytes, + uint32_t flags); +int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes, + uint32_t timeout, uint32_t flags); +int32_t habmm_socket_sendto(int32_t handle, void *src_buff, uint32_t size_bytes, + int32_t remote_handle, uint32_t flags); +int32_t habmm_socket_recvfrom(int32_t handle, void *dst_buff, + uint32_t *size_bytes, uint32_t timeout, + int32_t *remote_handle, uint32_t flags); +int32_t habmm_export(int32_t handle, void *buff_to_share, uint32_t size_bytes, + uint32_t *export_id, uint32_t flags); +int32_t habmm_unexport(int32_t handle, uint32_t export_id, uint32_t flags); +int32_t habmm_import(int32_t handle, void **buff_shared, uint32_t size_bytes, + uint32_t export_id, uint32_t flags); +int32_t habmm_unimport(int32_t handle, uint32_t export_id, void *buff_shared, + uint32_t flags); + +#endif diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 1c1ff7e4faa4..021b1e9ff6cd 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -15,6 +15,8 @@ #include <net/net_namespace.h> #include <linux/sched/rt.h> +#include <asm/thread_info.h> + #ifdef CONFIG_SMP # define INIT_PUSHABLE_TASKS(tsk) \ .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), @@ -183,14 +185,21 @@ extern struct task_group root_task_group; # define INIT_KASAN(tsk) #endif +#ifdef CONFIG_THREAD_INFO_IN_TASK +# define INIT_TASK_TI(tsk) .thread_info = INIT_THREAD_INFO(tsk), +#else +# define INIT_TASK_TI(tsk) +#endif + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) */ #define INIT_TASK(tsk) \ { \ + INIT_TASK_TI(tsk) \ .state = 0, \ - .stack = &init_thread_info, \ + .stack = init_stack, \ .usage = ATOMIC_INIT(2), \ .flags = PF_KTHREAD, \ .prio = MAX_PRIO-20, \ diff --git a/include/linux/ipa_uc_offload.h b/include/linux/ipa_uc_offload.h index 0277e87a2570..85d0ce92e6f6 100644 --- a/include/linux/ipa_uc_offload.h +++ b/include/linux/ipa_uc_offload.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -163,6 +163,20 @@ struct ipa_perf_profile { u32 max_supported_bw_mbps; }; +/** + * struct ipa_uc_ready_params - uC ready CB parameters + * @is_uC_ready: uC loaded or not + * @priv : callback cookie + * @notify: callback + * @proto: uC offload protocol type + */ +struct ipa_uc_ready_params { + bool is_uC_ready; + void *priv; + ipa_uc_ready_cb notify; + enum ipa_uc_offload_proto proto; +}; + #if defined CONFIG_IPA || defined CONFIG_IPA3 /** @@ -223,6 +237,19 @@ int ipa_uc_offload_disconn_pipes(u32 clnt_hdl); */ int ipa_set_perf_profile(struct ipa_perf_profile *profile); + +/* + * To register uC ready callback if uC not ready + * and also check uC readiness + * if uC not ready only, register callback + */ +int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param); + +/* + * To de-register uC ready callback + */ +void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto); + #else /* (CONFIG_IPA || CONFIG_IPA3) */ static inline int ipa_uc_offload_reg_intf( @@ -254,6 +281,15 @@ static inline int ipa_set_perf_profile(struct ipa_perf_profile *profile) return -EPERM; } +static inline int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param) +{ + return -EPERM; +} + +static void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto) +{ +} + #endif /* CONFIG_IPA3 */ #endif /* _IPA_UC_OFFLOAD_H_ */ diff --git a/include/linux/kdb.h b/include/linux/kdb.h index a19bcf9e762e..410decacff8f 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -177,7 +177,7 @@ extern int kdb_get_kbd_char(void); static inline int kdb_process_cpu(const struct task_struct *p) { - unsigned int cpu = task_thread_info(p)->cpu; + unsigned int cpu = task_cpu(p); if (cpu > num_possible_cpus()) cpu = 0; return cpu; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e4937bbeae2c..33316a1ae98f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -515,6 +515,10 @@ struct mm_struct { */ bool tlb_flush_pending; #endif +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + /* See flush_tlb_batched_pending() */ + bool tlb_flush_batched; +#endif struct uprobes_state uprobes_state; #ifdef CONFIG_X86_INTEL_MPX /* address of the bounds directory */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 0065ffc9322b..08b3b8348fd7 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -170,6 +170,7 @@ extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); extern int mmc_set_auto_bkops(struct mmc_card *card, bool enable); extern int mmc_suspend_clk_scaling(struct mmc_host *host); +extern void mmc_flush_detect_work(struct mmc_host *); #define MMC_ERASE_ARG 0x00000000 #define MMC_SECURE_ERASE_ARG 0x80000000 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index aea4c0f2ef5f..65a188eeeeb6 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -519,6 +519,7 @@ struct mmc_host { unsigned int bus_resume_flags; #define MMC_BUSRESUME_MANUAL_RESUME (1 << 0) #define MMC_BUSRESUME_NEEDS_RESUME (1 << 1) + bool ignore_bus_resume_flags; unsigned int sdio_irqs; struct task_struct *sdio_irq_thread; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 982b93ccfbe4..8b8a46ce32d0 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -199,6 +199,8 @@ enum power_supply_property { POWER_SUPPLY_PROP_LOW_POWER, POWER_SUPPLY_PROP_COOL_TEMP, POWER_SUPPLY_PROP_WARM_TEMP, + POWER_SUPPLY_PROP_COLD_TEMP, + POWER_SUPPLY_PROP_HOT_TEMP, POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL, POWER_SUPPLY_PROP_RESISTANCE, POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE, diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 60d15a080d7c..9d3eda39bcd2 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -37,7 +37,7 @@ void rcu_cpu_stall_reset(void); /* * Note a virtualization-based context switch. This is simply a * wrapper around rcu_note_context_switch(), which allows TINY_RCU - * to save a few bytes. + * to save a few bytes. The caller must have disabled interrupts. */ static inline void rcu_virt_note_context_switch(int cpu) { diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h new file mode 100644 index 000000000000..0d905d8ec553 --- /dev/null +++ b/include/linux/restart_block.h @@ -0,0 +1,51 @@ +/* + * Common syscall restarting data + */ +#ifndef __LINUX_RESTART_BLOCK_H +#define __LINUX_RESTART_BLOCK_H + +#include <linux/compiler.h> +#include <linux/types.h> + +struct timespec; +struct compat_timespec; +struct pollfd; + +/* + * System call restart block. + */ +struct restart_block { + long (*fn)(struct restart_block *); + union { + /* For futex_wait and futex_wait_requeue_pi */ + struct { + u32 __user *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 __user *uaddr2; + } futex; + /* For nanosleep */ + struct { + clockid_t clockid; + struct timespec __user *rmtp; +#ifdef CONFIG_COMPAT + struct compat_timespec __user *compat_rmtp; +#endif + u64 expires; + } nanosleep; + /* For poll */ + struct { + struct pollfd __user *ufds; + int nfds; + int has_timeout; + unsigned long tv_sec; + unsigned long tv_nsec; + } poll; + }; +}; + +extern long do_no_restart_syscall(struct restart_block *parm); + +#endif /* __LINUX_RESTART_BLOCK_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index baf107821d9a..2716faadc618 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -882,6 +882,16 @@ struct signal_struct { #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ +#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ + SIGNAL_STOP_CONTINUED) + +static inline void signal_set_stop_flags(struct signal_struct *sig, + unsigned int flags) +{ + WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); + sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; +} + /* If true, all threads except ->group_exit_task have pending SIGKILL */ static inline int signal_group_exit(const struct signal_struct *sig) { @@ -1601,6 +1611,13 @@ struct tlbflush_unmap_batch { }; struct task_struct { +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* + * For reasons of header soup (see current_thread_info()), this + * must be the first element of task_struct. + */ + struct thread_info thread_info; +#endif volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ void *stack; atomic_t usage; @@ -1610,6 +1627,9 @@ struct task_struct { #ifdef CONFIG_SMP struct llist_node wake_entry; int on_cpu; +#ifdef CONFIG_THREAD_INFO_IN_TASK + unsigned int cpu; /* current CPU */ +#endif unsigned int wakee_flips; unsigned long wakee_flip_decay_ts; struct task_struct *last_wakee; @@ -2181,22 +2201,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk) static inline int pid_alive(const struct task_struct *p); static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); -static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) -{ - pid_t pid = 0; - - rcu_read_lock(); - if (pid_alive(tsk)) - pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); - rcu_read_unlock(); - - return pid; -} - -static inline pid_t task_ppid_nr(const struct task_struct *tsk) -{ - return task_ppid_nr_ns(tsk, &init_pid_ns); -} static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) @@ -2231,6 +2235,23 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL); } +static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) +{ + pid_t pid = 0; + + rcu_read_lock(); + if (pid_alive(tsk)) + pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); + rcu_read_unlock(); + + return pid; +} + +static inline pid_t task_ppid_nr(const struct task_struct *tsk) +{ + return task_ppid_nr_ns(tsk, &init_pid_ns); +} + /* obsolete, do not use */ static inline pid_t task_pgrp_nr(struct task_struct *tsk) { @@ -2758,7 +2779,9 @@ extern void set_curr_task(int cpu, struct task_struct *p); void yield(void); union thread_union { +#ifndef CONFIG_THREAD_INFO_IN_TASK struct thread_info thread_info; +#endif unsigned long stack[THREAD_SIZE/sizeof(long)]; }; @@ -3154,10 +3177,34 @@ static inline void threadgroup_change_end(struct task_struct *tsk) cgroup_threadgroup_change_end(tsk); } -#ifndef __HAVE_THREAD_FUNCTIONS +#ifdef CONFIG_THREAD_INFO_IN_TASK + +static inline struct thread_info *task_thread_info(struct task_struct *task) +{ + return &task->thread_info; +} + +/* + * When accessing the stack of a non-current task that might exit, use + * try_get_task_stack() instead. task_stack_page will return a pointer + * that could get freed out from under you. + */ +static inline void *task_stack_page(const struct task_struct *task) +{ + return task->stack; +} + +#define setup_thread_stack(new,old) do { } while(0) + +static inline unsigned long *end_of_stack(const struct task_struct *task) +{ + return task->stack; +} + +#elif !defined(__HAVE_THREAD_FUNCTIONS) #define task_thread_info(task) ((struct thread_info *)(task)->stack) -#define task_stack_page(task) ((task)->stack) +#define task_stack_page(task) ((void *)(task)->stack) static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) { @@ -3184,6 +3231,14 @@ static inline unsigned long *end_of_stack(struct task_struct *p) } #endif + +static inline void *try_get_task_stack(struct task_struct *tsk) +{ + return task_stack_page(tsk); +} + +static inline void put_task_stack(struct task_struct *tsk) {} + #define task_stack_end_corrupted(task) \ (*(end_of_stack(task)) != STACK_END_MAGIC) @@ -3194,7 +3249,7 @@ static inline int object_is_on_stack(void *obj) return (obj >= stack) && (obj < (stack + THREAD_SIZE)); } -extern void thread_info_cache_init(void); +extern void thread_stack_cache_init(void); #ifdef CONFIG_DEBUG_STACK_USAGE static inline unsigned long stack_not_used(struct task_struct *p) @@ -3458,7 +3513,11 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) static inline unsigned int task_cpu(const struct task_struct *p) { +#ifdef CONFIG_THREAD_INFO_IN_TASK + return p->cpu; +#else return task_thread_info(p)->cpu; +#endif } static inline int task_node(const struct task_struct *p) diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index b2c1ea2a4739..f276869a945e 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -402,7 +402,7 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port); static inline int uart_tx_stopped(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; - if (tty->stopped || port->hw_stopped) + if ((tty && tty->stopped) || port->hw_stopped) return 1; return 0; } diff --git a/include/linux/slab.h b/include/linux/slab.h index 4ef384b172e0..b4e739f04ee6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -215,7 +215,7 @@ static inline const char *__check_heap_object(const void *ptr, * (PAGE_SIZE*2). Larger requests are passed to the page allocator. */ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif @@ -228,7 +228,7 @@ static inline const char *__check_heap_object(const void *ptr, * be allocated from the same page. */ #define KMALLOC_SHIFT_HIGH PAGE_SHIFT -#define KMALLOC_SHIFT_MAX 30 +#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 0a34489a46b6..17a33f31bfa2 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -23,6 +23,8 @@ extern void print_stack_trace(struct stack_trace *trace, int spaces); extern int snprint_stack_trace(char *buf, size_t size, struct stack_trace *trace, int spaces); +#define BACKPORTED_EXPORT_SAVE_STACK_TRACE_TSK_ARM + #ifdef CONFIG_USER_STACKTRACE_SUPPORT extern void save_stack_trace_user(struct stack_trace *trace); #else diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 4cf89517783a..8933ecc2bc9f 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -9,46 +9,17 @@ #include <linux/types.h> #include <linux/bug.h> +#include <linux/restart_block.h> -struct timespec; -struct compat_timespec; - +#ifdef CONFIG_THREAD_INFO_IN_TASK /* - * System call restart block. + * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the + * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels, + * including <asm/current.h> can cause a circular dependency on some platforms. */ -struct restart_block { - long (*fn)(struct restart_block *); - union { - /* For futex_wait and futex_wait_requeue_pi */ - struct { - u32 __user *uaddr; - u32 val; - u32 flags; - u32 bitset; - u64 time; - u32 __user *uaddr2; - } futex; - /* For nanosleep */ - struct { - clockid_t clockid; - struct timespec __user *rmtp; -#ifdef CONFIG_COMPAT - struct compat_timespec __user *compat_rmtp; +#include <asm/current.h> +#define current_thread_info() ((struct thread_info *)current) #endif - u64 expires; - } nanosleep; - /* For poll */ - struct { - struct pollfd __user *ufds; - int nfds; - int has_timeout; - unsigned long tv_sec; - unsigned long tv_nsec; - } poll; - }; -}; - -extern long do_no_restart_syscall(struct restart_block *parm); #include <linux/bitops.h> #include <asm/thread_info.h> diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0e32bc71245e..2e04fa5a5b58 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -311,6 +311,7 @@ enum { __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ + __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ @@ -408,7 +409,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, * Pointer to the allocated workqueue on success, %NULL on failure. */ #define alloc_ordered_workqueue(fmt, flags, args...) \ - alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) + alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ + __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) #define create_workqueue(name) \ alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name)) diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h index 762f1c51620c..2c8b651147e0 100644 --- a/include/media/msm_cam_sensor.h +++ b/include/media/msm_cam_sensor.h @@ -84,6 +84,15 @@ struct msm_ir_cut_cfg_data_t32 { enum msm_ir_cut_cfg_type_t cfg_type; }; +struct msm_laser_led_cfg_data_t32 { + enum msm_laser_led_cfg_type_t cfg_type; + compat_uptr_t setting; + compat_uptr_t debug_reg; + uint32_t debug_reg_size; + uint16_t i2c_addr; + enum i2c_freq_mode_t i2c_freq_mode; +}; + struct eeprom_read_t32 { compat_uptr_t dbuffer; uint32_t num_bytes; @@ -276,7 +285,10 @@ struct msm_flash_cfg_data_t32 { #define VIDIOC_MSM_IR_CUT_CFG32 \ _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_cut_cfg_data_t32) -#endif + +#define VIDIOC_MSM_LASER_LED_CFG32 \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 16, struct msm_laser_led_cfg_data_t32) #endif +#endif diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 3d11c7d26686..1997eed64c68 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -79,6 +79,9 @@ struct wiphy; /* Indicate backport support for FILS SK offload in cfg80211 */ #define CFG80211_FILS_SK_OFFLOAD_SUPPORT 1 +/* Indicate support for including KEK length in rekey data */ +#define CFG80211_REKEY_DATA_KEK_LEN 1 + /* * wireless hardware capability structures */ @@ -2059,9 +2062,14 @@ struct cfg80211_connect_params { * have to be updated as part of update_connect_params() call. * * @UPDATE_ASSOC_IES: Indicates whether association request IEs are updated + * @UPDATE_FILS_ERP_INFO: Indicates that FILS connection parameters (realm, + * username, erp sequence number and rrk) are updated + * @UPDATE_AUTH_TYPE: Indicates that Authentication type is updated */ enum cfg80211_connect_params_changed { UPDATE_ASSOC_IES = BIT(0), + UPDATE_FILS_ERP_INFO = BIT(1), + UPDATE_AUTH_TYPE = BIT(2), }; /** @@ -2283,12 +2291,14 @@ struct cfg80211_wowlan_wakeup { /** * struct cfg80211_gtk_rekey_data - rekey data - * @kek: key encryption key (NL80211_KEK_LEN bytes) + * @kek: key encryption key * @kck: key confirmation key (NL80211_KCK_LEN bytes) * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes) + * @kek_len: Length of @kek in octets */ struct cfg80211_gtk_rekey_data { const u8 *kek, *kck, *replay_ctr; + size_t kek_len; }; /** diff --git a/include/net/cnss_nl.h b/include/net/cnss_nl.h index 86c2fccc930e..b8a7cfdb7966 100644 --- a/include/net/cnss_nl.h +++ b/include/net/cnss_nl.h @@ -23,12 +23,16 @@ * @CLD80211_ATTR_VENDOR_DATA: Embed all other attributes in this nested * attribute. * @CLD80211_ATTR_DATA: Embed complete data in this attribute + * @CLD80211_ATTR_META_DATA: Embed meta data for above data. This will help + * wlan driver to peek into request message packet without opening up definition + * of complete request message. * * Any new message in future can be added as another attribute */ enum cld80211_attr { CLD80211_ATTR_VENDOR_DATA = 1, CLD80211_ATTR_DATA, + CLD80211_ATTR_META_DATA, /* add new attributes above here */ __CLD80211_ATTR_AFTER_LAST, diff --git a/include/net/ip.h b/include/net/ip.h index 8a8376239eac..c10f73803845 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -317,7 +317,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, !forwarding) return dst_mtu(dst); - return min(dst->dev->mtu, IP_MAX_MTU); + return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); } static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) @@ -330,7 +330,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); } - return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); + return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); } u32 ip_idents_reserve(u32 hash, int segs); diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index e0f4109e64c6..c2aa73e5e6bb 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -556,7 +556,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, memcpy(stream + lcp_len, ((char *) &iwe->u) + IW_EV_POINT_OFF, IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); - memcpy(stream + point_len, extra, iwe->u.data.length); + if (iwe->u.data.length && extra) + memcpy(stream + point_len, extra, iwe->u.data.length); stream += event_len; } return stream; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index e5bba897d206..7a5d6a073165 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -717,8 +717,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, old = *pold; *pold = new; if (old != NULL) { - qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); + unsigned int qlen = old->q.qlen; + unsigned int backlog = old->qstats.backlog; + qdisc_reset(old); + qdisc_tree_reduce_backlog(old, qlen, backlog); } sch_tree_unlock(sch); diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index ce13cf20f625..d33b17ba51d2 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -444,6 +444,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member) #define _sctp_walk_params(pos, chunk, end, member)\ for (pos.v = chunk->member;\ + (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\ + (void *)chunk + end) &&\ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ pos.v += WORD_ROUND(ntohs(pos.p->length))) @@ -454,6 +456,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length)) #define _sctp_walk_errors(err, chunk_hdr, end)\ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ sizeof(sctp_chunkhdr_t));\ + ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\ + (void *)chunk_hdr + end) &&\ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ ntohs(err->length) >= sizeof(sctp_errhdr_t); \ err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length)))) diff --git a/include/soc/qcom/minidump.h b/include/soc/qcom/minidump.h index 5eb18cb1a365..d5970cfef643 100644 --- a/include/soc/qcom/minidump.h +++ b/include/soc/qcom/minidump.h @@ -39,11 +39,16 @@ struct md_region { extern int msm_minidump_add_region(const struct md_region *entry); /* Sets to true, if minidump table is initialized */ extern bool minidump_enabled; +extern void dump_stack_minidump(u64 sp); #else static inline int msm_minidump_add_region(const struct md_region *entry) { /* Return quietly, if minidump is not supported */ return 0; } + +static inline void dump_stack_minidump(u64 sp) {} #endif + + #endif diff --git a/include/soc/qcom/ramdump.h b/include/soc/qcom/ramdump.h index 50a17c8ad605..4e23ccf269a7 100644 --- a/include/soc/qcom/ramdump.h +++ b/include/soc/qcom/ramdump.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2014, 2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,6 +16,7 @@ struct device; struct ramdump_segment { + char *name; unsigned long address; void *v_address; unsigned long size; @@ -28,6 +29,8 @@ extern int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments); extern int do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments); +extern int do_minidump(void *handle, struct ramdump_segment *segments, + int nsegments); #else static inline void *create_ramdump_device(const char *dev_name, diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index e0efe3fcf739..22f442ab85f9 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -562,6 +562,7 @@ struct iscsi_conn { #define LOGIN_FLAGS_READ_ACTIVE 1 #define LOGIN_FLAGS_CLOSED 2 #define LOGIN_FLAGS_READY 4 +#define LOGIN_FLAGS_INITIAL_PDU 8 unsigned long login_flags; struct delayed_work login_work; struct delayed_work login_cleanup_work; @@ -783,6 +784,7 @@ struct iscsi_np { int np_sock_type; enum np_thread_state_table np_thread_state; bool enabled; + atomic_t np_reset_count; enum iscsi_timer_flags_table np_login_timer_flags; u32 np_exports; enum np_flags_table np_flags; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index ed66414b91f0..1adf8739980c 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -714,6 +714,7 @@ struct se_lun { #define SE_LUN_LINK_MAGIC 0xffff7771 u32 lun_link_magic; u32 lun_access; + bool lun_shutdown; u32 lun_index; /* RELATIVE TARGET PORT IDENTIFER */ diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h index bef841446247..71159cb377d8 100644 --- a/include/uapi/drm/sde_drm.h +++ b/include/uapi/drm/sde_drm.h @@ -337,4 +337,14 @@ struct sde_drm_wb_cfg { uint64_t modes; }; +/** + * Define extended power modes supported by the SDE connectors. + */ +#define SDE_MODE_DPMS_ON 0 +#define SDE_MODE_DPMS_LP1 1 +#define SDE_MODE_DPMS_LP2 2 +#define SDE_MODE_DPMS_STANDBY 3 +#define SDE_MODE_DPMS_SUSPEND 4 +#define SDE_MODE_DPMS_OFF 5 + #endif /* _SDE_DRM_H_ */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 3d912dd57c08..b041334338f9 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -528,3 +528,4 @@ header-y += ipa_qmi_service_v01.h header-y += rmnet_ipa_fd_ioctl.h header-y += msm_ipa.h header-y += smcinvoke.h +header-y += habmm.h diff --git a/include/uapi/linux/habmm.h b/include/uapi/linux/habmm.h new file mode 100644 index 000000000000..902bd35ee474 --- /dev/null +++ b/include/uapi/linux/habmm.h @@ -0,0 +1,143 @@ +#ifndef HABMM_H +#define HABMM_H + +#include <linux/types.h> + +struct hab_send { + __u64 data; + __s32 vcid; + __u32 sizebytes; + __u32 flags; +}; + +struct hab_recv { + __u64 data; + __s32 vcid; + __u32 sizebytes; + __u32 flags; +}; + +struct hab_open { + __s32 vcid; + __u32 mmid; + __u32 timeout; + __u32 flags; +}; + +struct hab_close { + __s32 vcid; + __u32 flags; +}; + +struct hab_export { + __u64 buffer; + __s32 vcid; + __u32 sizebytes; + __u32 exportid; + __u32 flags; +}; + +struct hab_import { + __u64 index; + __u64 kva; + __s32 vcid; + __u32 sizebytes; + __u32 exportid; + __u32 flags; +}; + +struct hab_unexport { + __s32 vcid; + __u32 exportid; + __u32 flags; +}; + +struct hab_unimport { + __s32 vcid; + __u32 exportid; + __u64 kva; + __u32 flags; +}; + +#define HAB_IOC_TYPE 0x0A +#define HAB_MAX_MSG_SIZEBYTES 0x1000 +#define HAB_MAX_EXPORT_SIZE 0x8000000 + +#define HAB_MMID_CREATE(major, minor) ((major&0xFFFF) | ((minor&0xFF)<<16)) + +#define MM_AUD_START 100 +#define MM_AUD_1 101 +#define MM_AUD_2 102 +#define MM_AUD_3 103 +#define MM_AUD_4 104 +#define MM_AUD_END 105 + +#define MM_CAM_START 200 +#define MM_CAM 201 +#define MM_CAM_END 202 + +#define MM_DISP_START 300 +#define MM_DISP_1 301 +#define MM_DISP_2 302 +#define MM_DISP_3 303 +#define MM_DISP_4 304 +#define MM_DISP_5 305 +#define MM_DISP_END 306 + +#define MM_GFX_START 400 +#define MM_GFX 401 +#define MM_GFX_END 402 + +#define MM_VID_START 500 +#define MM_VID 501 +#define MM_VID_END 502 + +#define MM_MISC_START 600 +#define MM_MISC 601 +#define MM_MISC_END 602 + +#define MM_QCPE_START 700 +#define MM_QCPE_VM1 701 +#define MM_QCPE_VM2 702 +#define MM_QCPE_VM3 703 +#define MM_QCPE_VM4 704 +#define MM_QCPE_END 705 +#define MM_ID_MAX 706 + +#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE 0x00000000 +#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU 0x00000001 +#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_MULTI_DOMUS 0x00000002 + +#define HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING 0x00000001 + +#define HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING 0x00000001 + +#define HABMM_EXP_MEM_TYPE_DMA 0x00000001 + +#define HABMM_IMPORT_FLAGS_CACHED 0x00000001 + +#define IOCTL_HAB_SEND \ + _IOW(HAB_IOC_TYPE, 0x2, struct hab_send) + +#define IOCTL_HAB_RECV \ + _IOWR(HAB_IOC_TYPE, 0x3, struct hab_recv) + +#define IOCTL_HAB_VC_OPEN \ + _IOWR(HAB_IOC_TYPE, 0x4, struct hab_open) + +#define IOCTL_HAB_VC_CLOSE \ + _IOW(HAB_IOC_TYPE, 0x5, struct hab_close) + +#define IOCTL_HAB_VC_EXPORT \ + _IOWR(HAB_IOC_TYPE, 0x6, struct hab_export) + +#define IOCTL_HAB_VC_IMPORT \ + _IOWR(HAB_IOC_TYPE, 0x7, struct hab_import) + +#define IOCTL_HAB_VC_UNEXPORT \ + _IOW(HAB_IOC_TYPE, 0x8, struct hab_unexport) + +#define IOCTL_HAB_VC_UNIMPORT \ + _IOW(HAB_IOC_TYPE, 0x9, struct hab_unimport) + +#endif /* HABMM_H */ diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 4d0b992d0ba6..0bdfc9741d19 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -91,7 +91,11 @@ #define IPA_IOCTL_ALLOC_IPV6CT_TABLE 49 #define IPA_IOCTL_DEL_NAT_TABLE 50 #define IPA_IOCTL_DEL_IPV6CT_TABLE 51 -#define IPA_IOCTL_MAX 52 +#define IPA_IOCTL_ADD_VLAN_IFACE 52 +#define IPA_IOCTL_DEL_VLAN_IFACE 53 +#define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING 54 +#define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING 55 +#define IPA_IOCTL_MAX 56 /** * max size of the header to be inserted @@ -124,6 +128,17 @@ #define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4 /** + * max number of lan clients supported per device type + * for LAN stats via HW. + */ +#define IPA_MAX_NUM_HW_PATH_CLIENTS 16 + +/** + * max number of destination pipes possible for a client. + */ +#define QMI_IPA_MAX_CLIENT_DST_PIPES 4 + +/** * the attributes of the rule (routing or filtering) */ #define IPA_FLT_TOS (1ul << 0) @@ -435,7 +450,23 @@ enum ipa_ssr_event { IPA_SSR_EVENT_MAX }; -#define IPA_EVENT_MAX_NUM ((int)IPA_SSR_EVENT_MAX) +enum ipa_vlan_l2tp_event { + ADD_VLAN_IFACE = IPA_SSR_EVENT_MAX, + DEL_VLAN_IFACE, + ADD_L2TP_VLAN_MAPPING, + DEL_L2TP_VLAN_MAPPING, + IPA_VLAN_L2TP_EVENT_MAX, +}; + +enum ipa_per_client_stats_event { + IPA_PER_CLIENT_STATS_CONNECT_EVENT = IPA_VLAN_L2TP_EVENT_MAX, + IPA_PER_CLIENT_STATS_DISCONNECT_EVENT, + IPA_PER_CLIENT_STATS_EVENT_MAX, + IPA_EVENT_MAX_NUM = IPA_PER_CLIENT_STATS_EVENT_MAX, +}; + +#define IPA_EVENT_MAX_NUM ((int)IPA_PER_CLIENT_STATS_EVENT_MAX) +#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM) /** * enum ipa_rm_resource_name - IPA RM clients identification names @@ -1048,6 +1079,48 @@ struct ipa_rt_rule_del { }; /** + * struct ipa_rt_rule_add_ext - routing rule descriptor includes in + * and out parameters + * @rule: actual rule to be added + * @at_rear: add at back of routing table, it is NOT possible to add rules at + * the rear of the "default" routing tables + * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0 + * @status: output parameter, status of routing rule add operation, + * @rule_id: rule_id to be assigned to the routing rule. In case client + * specifies rule_id as 0 the driver will assign a new rule_id + * 0 for success, + * -1 for failure + */ +struct ipa_rt_rule_add_ext { + struct ipa_rt_rule rule; + uint8_t at_rear; + uint32_t rt_rule_hdl; + int status; + uint16_t rule_id; +}; + +/** + * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports + * multiple rules and commit with rule_id); + * + * all rules MUST be added to same table + * @commit: should rules be written to IPA HW also? + * @ip: IP family of rule + * @rt_tbl_name: name of routing table resource + * @num_rules: number of routing rules that follow + * @ipa_rt_rule_add_ext rules: all rules need to go back to back here, + * no pointers + */ +struct ipa_ioc_add_rt_rule_ext { + uint8_t commit; + enum ipa_ip_type ip; + char rt_tbl_name[IPA_RESOURCE_NAME_MAX]; + uint8_t num_rules; + struct ipa_rt_rule_add_ext rules[0]; +}; + + +/** * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports * multiple headers and commit) * @commit: should rules be removed from IPA HW also? @@ -1448,6 +1521,30 @@ struct ipa_ioc_nat_pdn_entry { }; /** + * struct ipa_ioc_vlan_iface_info - add vlan interface + * @name: interface name + * @vlan_id: VLAN ID + */ +struct ipa_ioc_vlan_iface_info { + char name[IPA_RESOURCE_NAME_MAX]; + uint8_t vlan_id; +}; + +/** + * struct ipa_ioc_l2tp_vlan_mapping_info - l2tp->vlan mapping info + * @iptype: l2tp tunnel IP type + * @l2tp_iface_name: l2tp interface name + * @l2tp_session_id: l2tp session id + * @vlan_iface_name: vlan interface name + */ +struct ipa_ioc_l2tp_vlan_mapping_info { + enum ipa_ip_type iptype; + char l2tp_iface_name[IPA_RESOURCE_NAME_MAX]; + uint8_t l2tp_session_id; + char vlan_iface_name[IPA_RESOURCE_NAME_MAX]; +}; + +/** * struct ipa_msg_meta - Format of the message meta-data. * @msg_type: the type of the message * @rsvd: reserved bits for future use. @@ -1582,6 +1679,52 @@ enum ipacm_client_enum { IPACM_CLIENT_WLAN, IPACM_CLIENT_MAX }; + +enum ipacm_per_client_device_type { + IPACM_CLIENT_DEVICE_TYPE_USB = 0, + IPACM_CLIENT_DEVICE_TYPE_WLAN = 1, + IPACM_CLIENT_DEVICE_TYPE_ETH = 2 +}; + +/** + * max number of device types supported. + */ +#define IPACM_MAX_CLIENT_DEVICE_TYPES 3 + +/** + * @lanIface - Name of the lan interface + * @mac: Mac address of the client. + */ +struct ipa_lan_client_msg { + char lanIface[IPA_RESOURCE_NAME_MAX]; + uint8_t mac[IPA_MAC_ADDR_SIZE]; +}; + +/** + * struct ipa_lan_client - lan client data + * @mac: MAC Address of the client. + * @client_idx: Client Index. + * @inited: Bool to indicate whether client info is set. + */ +struct ipa_lan_client { + uint8_t mac[IPA_MAC_ADDR_SIZE]; + int8_t client_idx; + uint8_t inited; +}; + +/** + * struct ipa_tether_device_info - tether device info indicated from IPACM + * @ul_src_pipe: Source pipe of the lan client. + * @hdr_len: Header length of the client. + * @num_clients: Number of clients connected. + */ +struct ipa_tether_device_info { + int32_t ul_src_pipe; + uint8_t hdr_len; + uint32_t num_clients; + struct ipa_lan_client lan_client[IPA_MAX_NUM_HW_PATH_CLIENTS]; +}; + /** * actual IOCTLs supported by IPA driver */ @@ -1594,6 +1737,9 @@ enum ipacm_client_enum { #define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \ IPA_IOCTL_ADD_RT_RULE, \ struct ipa_ioc_add_rt_rule *) +#define IPA_IOC_ADD_RT_RULE_EXT _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_RT_RULE_EXT, \ + struct ipa_ioc_add_rt_rule_ext *) #define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \ IPA_IOCTL_ADD_RT_RULE_AFTER, \ struct ipa_ioc_add_rt_rule_after *) @@ -1742,6 +1888,21 @@ enum ipacm_client_enum { IPA_IOCTL_GET_HW_VERSION, \ enum ipa_hw_type *) +#define IPA_IOC_ADD_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_VLAN_IFACE, \ + struct ipa_ioc_vlan_iface_info *) + +#define IPA_IOC_DEL_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_VLAN_IFACE, \ + struct ipa_ioc_vlan_iface_info *) + +#define IPA_IOC_ADD_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_L2TP_VLAN_MAPPING, \ + struct ipa_ioc_l2tp_vlan_mapping_info *) + +#define IPA_IOC_DEL_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_L2TP_VLAN_MAPPING, \ + struct ipa_ioc_l2tp_vlan_mapping_info *) /* * unique magic number of the Tethering bridge ioctls */ diff --git a/include/uapi/linux/msm_mdp_ext.h b/include/uapi/linux/msm_mdp_ext.h index da9ee3bcc525..61b5f8eaa7f9 100644 --- a/include/uapi/linux/msm_mdp_ext.h +++ b/include/uapi/linux/msm_mdp_ext.h @@ -821,4 +821,26 @@ struct mdp_hdr_stream { uint32_t content_type; uint32_t reserved[5]; }; + +/* hdr hdmi state takes possible values of 1, 2 and 4 respectively */ +#define HDR_ENABLE (1 << 0) +#define HDR_DISABLE (1 << 1) +#define HDR_RESET (1 << 2) + +/* + * HDR Control + * This encapsulates the HDR metadata as well as a state control + * for the HDR metadata as required by the HDMI spec to send the + * relevant metadata depending on the state of the HDR playback. + * hdr_state: Controls HDR state, takes values HDR_ENABLE, HDR_DISABLE + * and HDR_RESET. + * hdr_meta: Metadata sent by the userspace for the HDR clip. + */ + +#define DRM_MSM_EXT_PANEL_HDR_CTRL +struct mdp_hdr_stream_ctrl { + __u8 hdr_state; /* HDR state */ + struct mdp_hdr_stream hdr_stream; /* HDR metadata */ +}; + #endif diff --git a/include/uapi/linux/rmnet_ipa_fd_ioctl.h b/include/uapi/linux/rmnet_ipa_fd_ioctl.h index f04ac495a5c0..13dac9a1526d 100644 --- a/include/uapi/linux/rmnet_ipa_fd_ioctl.h +++ b/include/uapi/linux/rmnet_ipa_fd_ioctl.h @@ -33,6 +33,12 @@ #define WAN_IOCTL_QUERY_DL_FILTER_STATS 8 #define WAN_IOCTL_ADD_FLT_RULE_EX 9 #define WAN_IOCTL_QUERY_TETHER_STATS_ALL 10 +#define WAN_IOCTL_ADD_UL_FLT_RULE 11 +#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS 12 +#define WAN_IOCTL_QUERY_PER_CLIENT_STATS 13 +#define WAN_IOCTL_SET_LAN_CLIENT_INFO 14 +#define WAN_IOCTL_CLEAR_LAN_CLIENT_INFO 15 +#define WAN_IOCTL_SEND_LAN_CLIENT_MSG 16 /* User space may not have this defined. */ #ifndef IFNAMSIZ @@ -126,6 +132,57 @@ struct wan_ioctl_query_dl_filter_stats { uint32_t index; }; +struct wan_ioctl_send_lan_client_msg { + /* Lan client info. */ + struct ipa_lan_client_msg lan_client; + /* Event to indicate whether client is + * connected or disconnected. + */ + enum ipa_per_client_stats_event client_event; +}; + +struct wan_ioctl_lan_client_info { + /* Device type of the client. */ + enum ipacm_per_client_device_type device_type; + /* MAC Address of the client. */ + uint8_t mac[IPA_MAC_ADDR_SIZE]; + /* Init client. */ + uint8_t client_init; + /* Client Index */ + int8_t client_idx; + /* Header length of the client. */ + uint8_t hdr_len; + /* Source pipe of the lan client. */ + enum ipa_client_type ul_src_pipe; +}; + +struct wan_ioctl_per_client_info { + /* MAC Address of the client. */ + uint8_t mac[IPA_MAC_ADDR_SIZE]; + /* Ipv4 UL traffic bytes. */ + uint64_t ipv4_tx_bytes; + /* Ipv4 DL traffic bytes. */ + uint64_t ipv4_rx_bytes; + /* Ipv6 UL traffic bytes. */ + uint64_t ipv6_tx_bytes; + /* Ipv6 DL traffic bytes. */ + uint64_t ipv6_rx_bytes; +}; + +struct wan_ioctl_query_per_client_stats { + /* Device type of the client. */ + enum ipacm_per_client_device_type device_type; + /* Indicate whether to reset the stats (use 1) or not */ + uint8_t reset_stats; + /* Indicates whether client is disconnected. */ + uint8_t disconnect_clnt; + /* Number of clients. */ + uint8_t num_clients; + /* Client information. */ + struct wan_ioctl_per_client_info + client_info[IPA_MAX_NUM_HW_PATH_CLIENTS]; +}; + #define WAN_IOC_ADD_FLT_RULE _IOWR(WAN_IOC_MAGIC, \ WAN_IOCTL_ADD_FLT_RULE, \ struct ipa_install_fltr_rule_req_msg_v01 *) @@ -170,4 +227,27 @@ struct wan_ioctl_query_dl_filter_stats { WAN_IOCTL_QUERY_TETHER_STATS_ALL, \ struct wan_ioctl_query_tether_stats_all *) +#define WAN_IOC_ADD_UL_FLT_RULE _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_UL_FLT_RULE, \ + struct ipa_configure_ul_firewall_rules_req_msg_v01 *) + +#define WAN_IOC_ENABLE_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \ + bool *) + +#define WAN_IOC_QUERY_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_PER_CLIENT_STATS, \ + struct wan_ioctl_query_per_client_stats *) + +#define WAN_IOC_SET_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_LAN_CLIENT_INFO, \ + struct wan_ioctl_lan_client_info *) + +#define WAN_IOC_SEND_LAN_CLIENT_MSG _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SEND_LAN_CLIENT_MSG, \ + struct wan_ioctl_send_lan_client_msg *) + +#define WAN_IOC_CLEAR_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_CLEAR_LAN_CLIENT_INFO, \ + struct wan_ioctl_lan_client_info *) #endif /* _RMNET_IPA_FD_IOCTL_H */ diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 0d87fa1e253c..25cb17ca6bf3 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -1225,6 +1225,13 @@ enum v4l2_mpeg_vidc_video_au_delimiter { V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_ENABLED = 1 }; +#define V4L2_CID_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME \ + (V4L2_CID_MPEG_MSM_VIDC_BASE + 103) +enum v4l2_mpeg_vidc_video_venc_send_skipped_frame { + V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE = 0, + V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_ENABLE = 1 +}; + /* Camera class control IDs */ diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index fa930a91b4aa..36e94588d1d9 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -2,7 +2,7 @@ * Video for Linux Two header file * * Copyright (C) 1999-2012 the contributors - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -590,6 +590,11 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_SGBRG10DPCM6 v4l2_fourcc('b', 'G', 'A', '6') #define V4L2_PIX_FMT_SGRBG10DPCM6 v4l2_fourcc('B', 'D', '1', '6') #define V4L2_PIX_FMT_SRGGB10DPCM6 v4l2_fourcc('b', 'R', 'A', '6') + /* 10bit raw bayer, plain16 packed */ +#define V4L2_PIX_FMT_SBGGRPLAIN16 v4l2_fourcc('B', 'G', '1', '6') +#define V4L2_PIX_FMT_SGBRGPLAIN16 v4l2_fourcc('G', 'B', '1', '6') +#define V4L2_PIX_FMT_SGRBGPLAIN16 v4l2_fourcc('G', 'R', '1', '6') +#define V4L2_PIX_FMT_SRGGBPLAIN16 v4l2_fourcc('R', 'G', '1', '6') /* compressed formats */ #define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */ diff --git a/include/uapi/media/ais/msm_ais_sensor.h b/include/uapi/media/ais/msm_ais_sensor.h index f8b98def850a..eb9c24024383 100644 --- a/include/uapi/media/ais/msm_ais_sensor.h +++ b/include/uapi/media/ais/msm_ais_sensor.h @@ -178,6 +178,27 @@ enum cci_i2c_master_t { MASTER_MAX, }; +struct msm_sensor_event_data { + uint16_t sensor_slave_addr; +}; + +enum msm_sensor_event_mask_index { + SENSOR_EVENT_MASK_INDEX_SIGNAL_STATUS = 2, +}; + +#define SENSOR_EVENT_SUBS_MASK_NONE 0 + +#define SENSOR_EVENT_SUBS_MASK_SIGNAL_STATUS \ + (1 << SENSOR_EVENT_MASK_INDEX_SIGNAL_STATUS) + +enum msm_sensor_event_idx { + SENSOR_SIGNAL_STATUS = 2, + SENSOR_EVENT_MAX = 15 +}; + +#define SENSOR_EVENT_BASE (V4L2_EVENT_PRIVATE_START) +#define SENSOR_EVENT_SIGNAL_STATUS (SENSOR_EVENT_BASE + SENSOR_SIGNAL_STATUS) + struct msm_camera_i2c_array_write_config { struct msm_camera_i2c_reg_setting conf_array; uint16_t slave_addr; diff --git a/include/uapi/media/ais/msm_ais_sensor_sdk.h b/include/uapi/media/ais/msm_ais_sensor_sdk.h index c2a93a51a985..3f63bde39cf3 100644 --- a/include/uapi/media/ais/msm_ais_sensor_sdk.h +++ b/include/uapi/media/ais/msm_ais_sensor_sdk.h @@ -285,6 +285,11 @@ struct msm_sensor_id_info_t { unsigned short sensor_id_mask; }; +struct msm_camera_sensor_gpio_intr_config { + int gpio_num; + uint32_t gpio_trigger; +}; + struct msm_camera_sensor_slave_info { char sensor_name[32]; char eeprom_name[32]; @@ -300,6 +305,9 @@ struct msm_camera_sensor_slave_info { unsigned char is_init_params_valid; struct msm_sensor_init_params sensor_init_params; enum msm_sensor_output_format_t output_format; + struct msm_camera_sensor_gpio_intr_config + gpio_intr_config; + unsigned int camera_sensor_device_id; }; struct msm_camera_i2c_reg_array { diff --git a/include/uapi/media/msm_cam_sensor.h b/include/uapi/media/msm_cam_sensor.h index c6144cd8f355..0ec18d663cff 100644 --- a/include/uapi/media/msm_cam_sensor.h +++ b/include/uapi/media/msm_cam_sensor.h @@ -88,6 +88,7 @@ enum sensor_sub_module_t { SUB_MODULE_EXT, SUB_MODULE_IR_LED, SUB_MODULE_IR_CUT, + SUB_MODULE_LASER_LED, SUB_MODULE_MAX, }; @@ -301,6 +302,15 @@ struct msm_ir_cut_cfg_data_t { enum msm_ir_cut_cfg_type_t cfg_type; }; +struct msm_laser_led_cfg_data_t { + enum msm_laser_led_cfg_type_t cfg_type; + void __user *setting; + void __user *debug_reg; + uint32_t debug_reg_size; + uint16_t i2c_addr; + enum i2c_freq_mode_t i2c_freq_mode; +}; + struct msm_eeprom_cfg_data { enum eeprom_cfg_type_t cfgtype; uint8_t is_supported; @@ -381,7 +391,9 @@ enum msm_ois_cfg_download_type_t { enum msm_ois_i2c_operation { MSM_OIS_WRITE = 0, MSM_OIS_POLL, + MSM_OIS_READ, }; +#define MSM_OIS_READ MSM_OIS_READ struct reg_settings_ois_t { uint16_t reg_addr; @@ -616,5 +628,8 @@ struct sensor_init_cfg_data { #define VIDIOC_MSM_IR_CUT_CFG \ _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_cut_cfg_data_t) +#define VIDIOC_MSM_LASER_LED_CFG \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 16, struct msm_laser_led_cfg_data_t) + #endif diff --git a/include/uapi/media/msm_camera.h b/include/uapi/media/msm_camera.h index 10ee4b7c9390..39e6927d9b7e 100644 --- a/include/uapi/media/msm_camera.h +++ b/include/uapi/media/msm_camera.h @@ -1541,7 +1541,9 @@ enum msm_camera_i2c_reg_addr_type { MSM_CAMERA_I2C_BYTE_ADDR = 1, MSM_CAMERA_I2C_WORD_ADDR, MSM_CAMERA_I2C_3B_ADDR, + MSM_CAMERA_I2C_DWORD_ADDR, }; +#define MSM_CAMERA_I2C_DWORD_ADDR MSM_CAMERA_I2C_DWORD_ADDR struct msm_camera_i2c_reg_array { uint16_t reg_addr; diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h index a92c144f712e..08605aca474d 100644 --- a/include/uapi/media/msm_camsensor_sdk.h +++ b/include/uapi/media/msm_camsensor_sdk.h @@ -85,8 +85,10 @@ enum msm_camera_i2c_reg_addr_type { MSM_CAMERA_I2C_BYTE_ADDR = 1, MSM_CAMERA_I2C_WORD_ADDR, MSM_CAMERA_I2C_3B_ADDR, + MSM_CAMERA_I2C_DWORD_ADDR, MSM_CAMERA_I2C_ADDR_TYPE_MAX, }; +#define MSM_CAMERA_I2C_DWORD_ADDR MSM_CAMERA_I2C_DWORD_ADDR enum msm_camera_i2c_data_type { MSM_CAMERA_I2C_BYTE_DATA = 1, @@ -206,6 +208,13 @@ enum msm_ir_led_cfg_type_t { #define CFG_IR_LED_OFF CFG_IR_LED_OFF #define CFG_IR_LED_ON CFG_IR_LED_ON +enum msm_laser_led_cfg_type_t { + CFG_LASER_LED_INIT, + CFG_LASER_LED_CONTROL, +}; +#define CFG_LASER_LED_INIT CFG_LASER_LED_INIT +#define CFG_LASER_LED_CONTROL CFG_LASER_LED_CONTROL + enum msm_ir_cut_cfg_type_t { CFG_IR_CUT_INIT = 0, CFG_IR_CUT_RELEASE, diff --git a/include/uapi/media/msmb_camera.h b/include/uapi/media/msmb_camera.h index df9807e72e47..4b23806071d4 100644 --- a/include/uapi/media/msmb_camera.h +++ b/include/uapi/media/msmb_camera.h @@ -52,6 +52,7 @@ #define MSM_CAMERA_SUBDEV_IR_CUT 18 #define MSM_CAMERA_SUBDEV_EXT 19 #define MSM_CAMERA_SUBDEV_TOF 20 +#define MSM_CAMERA_SUBDEV_LASER_LED 21 #define MSM_MAX_CAMERA_SENSORS 5 /* The below macro is defined to put an upper limit on maximum diff --git a/init/Kconfig b/init/Kconfig index 7b8b4171ce00..1473414ec8fe 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -26,6 +26,16 @@ config IRQ_WORK config BUILDTIME_EXTABLE_SORT bool +config THREAD_INFO_IN_TASK + bool + help + Select this to move thread_info off the stack into task_struct. To + make this work, an arch will need to remove all thread_info fields + except flags and fix any runtime bugs. + + One subtle change that will be needed is to use try_get_task_stack() + and put_task_stack() in save_thread_stack_tsk() and get_wchan(). + menu "General setup" config BROKEN diff --git a/init/init_task.c b/init/init_task.c index ba0a7f362d9e..11f83be1fa79 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -22,5 +22,8 @@ EXPORT_SYMBOL(init_task); * Initial thread structure. Alignment of this is handled by a special * linker map entry. */ -union thread_union init_thread_union __init_task_data = - { INIT_THREAD_INFO(init_task) }; +union thread_union init_thread_union __init_task_data = { +#ifndef CONFIG_THREAD_INFO_IN_TASK + INIT_THREAD_INFO(init_task) +#endif +}; diff --git a/init/main.c b/init/main.c index 7d4532bff5da..8c72af285838 100644 --- a/init/main.c +++ b/init/main.c @@ -468,7 +468,7 @@ void __init __weak smp_setup_processor_id(void) } # if THREAD_SIZE >= PAGE_SIZE -void __init __weak thread_info_cache_init(void) +void __init __weak thread_stack_cache_init(void) { } #endif @@ -644,7 +644,7 @@ asmlinkage __visible void __init start_kernel(void) /* Should be run before the first non-init thread is created */ init_espfix_bsp(); #endif - thread_info_cache_init(); + thread_stack_cache_init(); cred_init(); fork_init(); proc_caches_init(); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 939945a5649c..a162661c9d60 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -457,13 +457,15 @@ void audit_remove_watch_rule(struct audit_krule *krule) list_del(&krule->rlist); if (list_empty(&watch->rules)) { + /* + * audit_remove_watch() drops our reference to 'parent' which + * can get freed. Grab our own reference to be safe. + */ + audit_get_parent(parent); audit_remove_watch(watch); - - if (list_empty(&parent->watches)) { - audit_get_parent(parent); + if (list_empty(&parent->watches)) fsnotify_destroy_mark(&parent->mark, audit_watch_group); - audit_put_parent(parent); - } + audit_put_parent(parent); } } diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 077bb52e2d47..3fdb7545852e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2799,6 +2799,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) int retval = 0; mutex_lock(&cgroup_mutex); + percpu_down_write(&cgroup_threadgroup_rwsem); for_each_root(root) { struct cgroup *from_cgrp; @@ -2813,6 +2814,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) if (retval) break; } + percpu_up_write(&cgroup_threadgroup_rwsem); mutex_unlock(&cgroup_mutex); return retval; @@ -4072,6 +4074,8 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) mutex_lock(&cgroup_mutex); + percpu_down_write(&cgroup_threadgroup_rwsem); + /* all tasks in @from are being moved, all csets are source */ spin_lock_irq(&css_set_lock); list_for_each_entry(link, &from->cset_links, cset_link) @@ -4100,6 +4104,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) } while (task && !ret); out_err: cgroup_migrate_finish(&preloaded_csets); + percpu_up_write(&cgroup_threadgroup_rwsem); mutex_unlock(&cgroup_mutex); return ret; } diff --git a/kernel/cpu.c b/kernel/cpu.c index 1d6b0a209bc0..5b4440d57f89 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -91,6 +91,11 @@ static struct { #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) +void cpu_hotplug_mutex_held(void) +{ + lockdep_assert_held(&cpu_hotplug.lock); +} +EXPORT_SYMBOL(cpu_hotplug_mutex_held); void get_online_cpus(void) { diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 03dbc231a4a0..1656a48d5bee 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -60,6 +60,7 @@ #include <linux/cgroup.h> #include <linux/wait.h> +struct static_key cpusets_pre_enable_key __read_mostly = STATIC_KEY_INIT_FALSE; struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE; /* See "Frequency meter" comments, below. */ @@ -806,16 +807,15 @@ done: * 'cpus' is removed, then call this routine to rebuild the * scheduler's dynamic sched domains. * - * Call with cpuset_mutex held. Takes get_online_cpus(). */ -static void rebuild_sched_domains_locked(void) +static void rebuild_sched_domains_unlocked(void) { struct sched_domain_attr *attr; cpumask_var_t *doms; int ndoms; + cpu_hotplug_mutex_held(); lockdep_assert_held(&cpuset_mutex); - get_online_cpus(); /* * We have raced with CPU hotplug. Don't do anything to avoid @@ -823,27 +823,27 @@ static void rebuild_sched_domains_locked(void) * Anyways, hotplug work item will rebuild sched domains. */ if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) - goto out; + return; /* Generate domain masks and attrs */ ndoms = generate_sched_domains(&doms, &attr); /* Have scheduler rebuild the domains */ partition_sched_domains(ndoms, doms, attr); -out: - put_online_cpus(); } #else /* !CONFIG_SMP */ -static void rebuild_sched_domains_locked(void) +static void rebuild_sched_domains_unlocked(void) { } #endif /* CONFIG_SMP */ void rebuild_sched_domains(void) { + get_online_cpus(); mutex_lock(&cpuset_mutex); - rebuild_sched_domains_locked(); + rebuild_sched_domains_unlocked(); mutex_unlock(&cpuset_mutex); + put_online_cpus(); } /** @@ -875,7 +875,6 @@ static void update_tasks_cpumask(struct cpuset *cs) * * On legacy hierachy, effective_cpus will be the same with cpu_allowed. * - * Called with cpuset_mutex held */ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) { @@ -930,7 +929,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) rcu_read_unlock(); if (need_rebuild_sched_domains) - rebuild_sched_domains_locked(); + rebuild_sched_domains_unlocked(); } /** @@ -1289,7 +1288,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) cs->relax_domain_level = val; if (!cpumask_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) - rebuild_sched_domains_locked(); + rebuild_sched_domains_unlocked(); } return 0; @@ -1320,7 +1319,6 @@ static void update_tasks_flags(struct cpuset *cs) * cs: the cpuset to update * turning_on: whether the flag is being set or cleared * - * Call with cpuset_mutex held. */ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, @@ -1355,7 +1353,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, spin_unlock_irq(&callback_lock); if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) - rebuild_sched_domains_locked(); + rebuild_sched_domains_unlocked(); if (spread_flag_changed) update_tasks_flags(cs); @@ -1620,6 +1618,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, cpuset_filetype_t type = cft->private; int retval = 0; + get_online_cpus(); mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) { retval = -ENODEV; @@ -1657,6 +1656,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, } out_unlock: mutex_unlock(&cpuset_mutex); + put_online_cpus(); return retval; } @@ -1667,6 +1667,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, cpuset_filetype_t type = cft->private; int retval = -ENODEV; + get_online_cpus(); mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) goto out_unlock; @@ -1681,6 +1682,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, } out_unlock: mutex_unlock(&cpuset_mutex); + put_online_cpus(); return retval; } @@ -1719,6 +1721,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, kernfs_break_active_protection(of->kn); flush_work(&cpuset_hotplug_work); + get_online_cpus(); mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) goto out_unlock; @@ -1744,6 +1747,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, free_trial_cpuset(trialcs); out_unlock: mutex_unlock(&cpuset_mutex); + put_online_cpus(); kernfs_unbreak_active_protection(of->kn); css_put(&cs->css); flush_workqueue(cpuset_migrate_mm_wq); @@ -2049,13 +2053,14 @@ out_unlock: /* * If the cpuset being removed has its flag 'sched_load_balance' * enabled, then simulate turning sched_load_balance off, which - * will call rebuild_sched_domains_locked(). + * will call rebuild_sched_domains_unlocked(). */ static void cpuset_css_offline(struct cgroup_subsys_state *css) { struct cpuset *cs = css_cs(css); + get_online_cpus(); mutex_lock(&cpuset_mutex); if (is_sched_load_balance(cs)) @@ -2065,6 +2070,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) clear_bit(CS_ONLINE, &cs->flags); mutex_unlock(&cpuset_mutex); + put_online_cpus(); } static void cpuset_css_free(struct cgroup_subsys_state *css) diff --git a/kernel/events/core.c b/kernel/events/core.c index 87f5d841f796..98928fb7fecc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8698,28 +8698,27 @@ SYSCALL_DEFINE5(perf_event_open, goto err_context; /* - * Do not allow to attach to a group in a different - * task or CPU context: + * Make sure we're both events for the same CPU; + * grouping events for different CPUs is broken; since + * you can never concurrently schedule them anyhow. */ - if (move_group) { - /* - * Make sure we're both on the same task, or both - * per-cpu events. - */ - if (group_leader->ctx->task != ctx->task) - goto err_context; + if (group_leader->cpu != event->cpu) + goto err_context; - /* - * Make sure we're both events for the same CPU; - * grouping events for different CPUs is broken; since - * you can never concurrently schedule them anyhow. - */ - if (group_leader->cpu != event->cpu) - goto err_context; - } else { - if (group_leader->ctx != ctx) - goto err_context; - } + /* + * Make sure we're both on the same task, or both + * per-CPU events. + */ + if (group_leader->ctx->task != ctx->task) + goto err_context; + + /* + * Do not allow to attach to a group in a different task + * or CPU context. If we're moving SW events, we'll fix + * this up later, so allow that. + */ + if (!move_group && group_leader->ctx != ctx) + goto err_context; /* * Only a group leader can be exclusive or pinned diff --git a/kernel/fork.c b/kernel/fork.c index fef4df444f47..07cd0d68ee02 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -148,18 +148,18 @@ static inline void free_task_struct(struct task_struct *tsk) } #endif -void __weak arch_release_thread_info(struct thread_info *ti) +void __weak arch_release_thread_stack(unsigned long *stack) { } -#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR +#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR /* * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a * kmemcache based allocator. */ # if THREAD_SIZE >= PAGE_SIZE -static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, +static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) { struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, @@ -168,30 +168,32 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, return page ? page_address(page) : NULL; } -static inline void free_thread_info(struct thread_info *ti) +static inline void free_thread_stack(unsigned long *stack) { - kasan_alloc_pages(virt_to_page(ti), THREAD_SIZE_ORDER); - free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER); + struct page *page = virt_to_page(stack); + + kasan_alloc_pages(page, THREAD_SIZE_ORDER); + __free_kmem_pages(page, THREAD_SIZE_ORDER); } # else -static struct kmem_cache *thread_info_cache; +static struct kmem_cache *thread_stack_cache; -static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, +static struct thread_info *alloc_thread_stack_node(struct task_struct *tsk, int node) { - return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node); + return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); } -static void free_thread_info(struct thread_info *ti) +static void free_stack(unsigned long *stack) { - kmem_cache_free(thread_info_cache, ti); + kmem_cache_free(thread_stack_cache, stack); } -void thread_info_cache_init(void) +void thread_stack_cache_init(void) { - thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, + thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE, THREAD_SIZE, 0, NULL); - BUG_ON(thread_info_cache == NULL); + BUG_ON(thread_stack_cache == NULL); } # endif #endif @@ -214,9 +216,9 @@ struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; -static void account_kernel_stack(struct thread_info *ti, int account) +static void account_kernel_stack(unsigned long *stack, int account) { - struct zone *zone = page_zone(virt_to_page(ti)); + struct zone *zone = page_zone(virt_to_page(stack)); mod_zone_page_state(zone, NR_KERNEL_STACK, account); } @@ -224,8 +226,8 @@ static void account_kernel_stack(struct thread_info *ti, int account) void free_task(struct task_struct *tsk) { account_kernel_stack(tsk->stack, -1); - arch_release_thread_info(tsk->stack); - free_thread_info(tsk->stack); + arch_release_thread_stack(tsk->stack); + free_thread_stack(tsk->stack); rt_mutex_debug_task_free(tsk); ftrace_graph_exit_task(tsk); put_seccomp_filter(tsk); @@ -336,7 +338,7 @@ void set_task_stack_end_magic(struct task_struct *tsk) static struct task_struct *dup_task_struct(struct task_struct *orig, int node) { struct task_struct *tsk; - struct thread_info *ti; + unsigned long *stack; int err; if (node == NUMA_NO_NODE) @@ -345,15 +347,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) if (!tsk) return NULL; - ti = alloc_thread_info_node(tsk, node); - if (!ti) + stack = alloc_thread_stack_node(tsk, node); + if (!stack) goto free_tsk; err = arch_dup_task_struct(tsk, orig); if (err) - goto free_ti; + goto free_stack; - tsk->stack = ti; + tsk->stack = stack; #ifdef CONFIG_SECCOMP /* * We must handle setting up seccomp filters once we're under @@ -385,12 +387,12 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; - account_kernel_stack(ti, 1); + account_kernel_stack(stack, 1); return tsk; -free_ti: - free_thread_info(ti); +free_stack: + free_thread_stack(stack); free_tsk: free_task_struct(tsk); return NULL; diff --git a/kernel/kthread.c b/kernel/kthread.c index 698b8dec3074..d9b0be5c6a5f 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -65,7 +65,7 @@ static inline struct kthread *to_kthread(struct task_struct *k) static struct kthread *to_live_kthread(struct task_struct *k) { struct completion *vfork = ACCESS_ONCE(k->vfork_done); - if (likely(vfork)) + if (likely(vfork) && try_get_task_stack(k)) return __to_kthread(vfork); return NULL; } @@ -427,8 +427,10 @@ void kthread_unpark(struct task_struct *k) { struct kthread *kthread = to_live_kthread(k); - if (kthread) + if (kthread) { __kthread_unpark(k, kthread); + put_task_stack(k); + } } EXPORT_SYMBOL_GPL(kthread_unpark); @@ -457,6 +459,7 @@ int kthread_park(struct task_struct *k) wait_for_completion(&kthread->parked); } } + put_task_stack(k); ret = 0; } return ret; @@ -492,6 +495,7 @@ int kthread_stop(struct task_struct *k) __kthread_unpark(k, kthread); wake_up_process(k); wait_for_completion(&kthread->exited); + put_task_stack(k); } ret = k->exit_code; put_task_struct(k); diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index 3ef3736002d8..9c951fade415 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter) } void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, - struct thread_info *ti) + struct task_struct *task) { SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); /* Mark the current thread as blocked on the lock: */ - ti->task->blocked_on = waiter; + task->blocked_on = waiter; } void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, - struct thread_info *ti) + struct task_struct *task) { DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task); - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter); - ti->task->blocked_on = NULL; + DEBUG_LOCKS_WARN_ON(waiter->task != task); + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); + task->blocked_on = NULL; list_del_init(&waiter->list); waiter->task = NULL; diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h index 0799fd3e4cfa..d06ae3bb46c5 100644 --- a/kernel/locking/mutex-debug.h +++ b/kernel/locking/mutex-debug.h @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock, extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); extern void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, - struct thread_info *ti); + struct task_struct *task); extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, - struct thread_info *ti); + struct task_struct *task); extern void debug_mutex_unlock(struct mutex *lock); extern void debug_mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key); diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 14b9cca36b05..bf5277ee11d3 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -549,7 +549,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, goto skip_wait; debug_mutex_lock_common(lock, &waiter); - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); + debug_mutex_add_waiter(lock, &waiter, task); /* add waiting tasks to the end of the waitqueue (FIFO): */ list_add_tail(&waiter.list, &lock->wait_list); @@ -596,7 +596,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, } __set_task_state(task, TASK_RUNNING); - mutex_remove_waiter(lock, &waiter, current_thread_info()); + mutex_remove_waiter(lock, &waiter, task); /* set it to 0 if there are no waiters left: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); @@ -617,7 +617,7 @@ skip_wait: return 0; err: - mutex_remove_waiter(lock, &waiter, task_thread_info(task)); + mutex_remove_waiter(lock, &waiter, task); spin_unlock_mutex(&lock->wait_lock, flags); debug_mutex_free_waiter(&waiter); mutex_release(&lock->dep_map, 1, ip); diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h index 5cda397607f2..a68bae5e852a 100644 --- a/kernel/locking/mutex.h +++ b/kernel/locking/mutex.h @@ -13,7 +13,7 @@ do { spin_lock(lock); (void)(flags); } while (0) #define spin_unlock_mutex(lock, flags) \ do { spin_unlock(lock); (void)(flags); } while (0) -#define mutex_remove_waiter(lock, waiter, ti) \ +#define mutex_remove_waiter(lock, waiter, task) \ __list_del((waiter)->list.prev, (waiter)->list.next) #ifdef CONFIG_MUTEX_SPIN_ON_OWNER diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index 1e6a51cc25c4..99b8d991126f 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -106,32 +106,6 @@ bool osq_lock(struct optimistic_spin_queue *lock) prev = decode_cpu(old); node->prev = prev; - - /* - * We need to avoid reordering of link updation sequence of osq. - * A case in which the status of optimistic spin queue is - * CPU6->CPU2 in which CPU6 has acquired the lock. At this point - * if CPU0 comes in to acquire osq_lock, it will update the tail - * count. After tail count update if CPU2 starts to unqueue itself - * from optimistic spin queue, it will find updated tail count with - * CPU0 and update CPU2 node->next to NULL in osq_wait_next(). If - * reordering of following stores happen then prev->next where prev - * being CPU2 would be updated to point to CPU0 node: - * node->prev = prev; - * WRITE_ONCE(prev->next, node); - * - * At this point if next instruction - * WRITE_ONCE(next->prev, prev); - * in CPU2 path is committed before the update of CPU0 node->prev = - * prev then CPU0 node->prev will point to CPU6 node. At this point - * if CPU0 path's node->prev = prev is committed resulting in change - * of CPU0 prev back to CPU2 node. CPU2 node->next is NULL, so if - * CPU0 gets into unqueue path of osq_lock it will keep spinning - * in infinite loop as condition prev->next == node will never be - * true. - */ - smp_mb(); - WRITE_ONCE(prev->next, node); /* diff --git a/kernel/panic.c b/kernel/panic.c index 679254405510..75f564a94a82 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -24,6 +24,7 @@ #include <linux/init.h> #include <linux/nmi.h> #include <linux/console.h> +#include <soc/qcom/minidump.h> #define CREATE_TRACE_POINTS #include <trace/events/exception.h> @@ -108,6 +109,7 @@ void panic(const char *fmt, ...) va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); + dump_stack_minidump(0); pr_emerg("Kernel panic - not syncing: %s\n", buf); #ifdef CONFIG_DEBUG_BUGVERBOSE /* diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 9fcb521fab0e..dca87791e9c1 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3180,9 +3180,8 @@ void show_regs_print_info(const char *log_lvl) { dump_stack_print_info(log_lvl); - printk("%stask: %p ti: %p task.ti: %p\n", - log_lvl, current, current_thread_info(), - task_thread_info(current)); + printk("%stask: %p task.stack: %p\n", + log_lvl, current, task_stack_page(current)); } #endif diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 2cb46d51d715..1ba183e7987c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -248,24 +248,17 @@ static int rcu_gp_in_progress(struct rcu_state *rsp) */ void rcu_sched_qs(void) { - unsigned long flags; - - if (__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) { - trace_rcu_grace_period(TPS("rcu_sched"), - __this_cpu_read(rcu_sched_data.gpnum), - TPS("cpuqs")); - __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false); - if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) - return; - local_irq_save(flags); - if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) { - __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false); - rcu_report_exp_rdp(&rcu_sched_state, - this_cpu_ptr(&rcu_sched_data), - true); - } - local_irq_restore(flags); - } + if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) + return; + trace_rcu_grace_period(TPS("rcu_sched"), + __this_cpu_read(rcu_sched_data.gpnum), + TPS("cpuqs")); + __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false); + if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) + return; + __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false); + rcu_report_exp_rdp(&rcu_sched_state, + this_cpu_ptr(&rcu_sched_data), true); } void rcu_bh_qs(void) @@ -302,17 +295,16 @@ EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr); * We inform the RCU core by emulating a zero-duration dyntick-idle * period, which we in turn do by incrementing the ->dynticks counter * by two. + * + * The caller must have disabled interrupts. */ static void rcu_momentary_dyntick_idle(void) { - unsigned long flags; struct rcu_data *rdp; struct rcu_dynticks *rdtp; int resched_mask; struct rcu_state *rsp; - local_irq_save(flags); - /* * Yes, we can lose flag-setting operations. This is OK, because * the flag will be set again after some delay. @@ -342,13 +334,12 @@ static void rcu_momentary_dyntick_idle(void) smp_mb__after_atomic(); /* Later stuff after QS. */ break; } - local_irq_restore(flags); } /* * Note a context switch. This is a quiescent state for RCU-sched, * and requires special handling for preemptible RCU. - * The caller must have disabled preemption. + * The caller must have disabled interrupts. */ void rcu_note_context_switch(void) { @@ -378,9 +369,14 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch); */ void rcu_all_qs(void) { + unsigned long flags; + barrier(); /* Avoid RCU read-side critical sections leaking down. */ - if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) + if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) { + local_irq_save(flags); rcu_momentary_dyntick_idle(); + local_irq_restore(flags); + } this_cpu_inc(rcu_qs_ctr); barrier(); /* Avoid RCU read-side critical sections leaking up. */ } diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 32cbe72bf545..c6fc11d626f8 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -147,8 +147,8 @@ static void __init rcu_bootup_announce(void) * the corresponding expedited grace period will also be the end of the * normal grace period. */ -static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp, - unsigned long flags) __releases(rnp->lock) +static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) + __releases(rnp->lock) /* But leaves rrupts disabled. */ { int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + @@ -236,7 +236,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp, rnp->gp_tasks = &t->rcu_node_entry; if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) rnp->exp_tasks = &t->rcu_node_entry; - raw_spin_unlock(&rnp->lock); + raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */ /* * Report the quiescent state for the expedited GP. This expedited @@ -251,7 +251,6 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp, } else { WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs); } - local_irq_restore(flags); } /* @@ -286,12 +285,11 @@ static void rcu_preempt_qs(void) * predating the current grace period drain, in other words, until * rnp->gp_tasks becomes NULL. * - * Caller must disable preemption. + * Caller must disable interrupts. */ static void rcu_preempt_note_context_switch(void) { struct task_struct *t = current; - unsigned long flags; struct rcu_data *rdp; struct rcu_node *rnp; @@ -301,7 +299,7 @@ static void rcu_preempt_note_context_switch(void) /* Possibly blocking in an RCU read-side critical section. */ rdp = this_cpu_ptr(rcu_state_p->rda); rnp = rdp->mynode; - raw_spin_lock_irqsave(&rnp->lock, flags); + raw_spin_lock(&rnp->lock); smp_mb__after_unlock_lock(); t->rcu_read_unlock_special.b.blocked = true; t->rcu_blocked_node = rnp; @@ -318,7 +316,7 @@ static void rcu_preempt_note_context_switch(void) (rnp->qsmask & rdp->grpmask) ? rnp->gpnum : rnp->gpnum + 1); - rcu_preempt_ctxt_queue(rnp, rdp, flags); + rcu_preempt_ctxt_queue(rnp, rdp); } else if (t->rcu_read_lock_nesting < 0 && t->rcu_read_unlock_special.s) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4ecca604e64b..2dbe599d34d5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -621,8 +621,7 @@ void resched_cpu(int cpu) struct rq *rq = cpu_rq(cpu); unsigned long flags; - if (!raw_spin_trylock_irqsave(&rq->lock, flags)) - return; + raw_spin_lock_irqsave(&rq->lock, flags); resched_curr(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); } @@ -3512,7 +3511,6 @@ static void __sched notrace __schedule(bool preempt) cpu = smp_processor_id(); rq = cpu_rq(cpu); - rcu_note_context_switch(); prev = rq->curr; /* @@ -3531,13 +3529,16 @@ static void __sched notrace __schedule(bool preempt) if (sched_feat(HRTICK)) hrtick_clear(rq); + local_irq_disable(); + rcu_note_context_switch(); + /* * Make sure that signal_pending_state()->signal_pending() below * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) * done by the caller to avoid the race with signal_wake_up(). */ smp_mb__before_spinlock(); - raw_spin_lock_irq(&rq->lock); + raw_spin_lock(&rq->lock); lockdep_pin_lock(&rq->lock); rq->clock_skip_update <<= 1; /* promote REQ to ACT */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 67b7da81f8a2..33bf0c07e757 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1767,7 +1767,11 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) * per-task data have been completed by this moment. */ smp_wmb(); +#ifdef CONFIG_THREAD_INFO_IN_TASK + p->cpu = cpu; +#else task_thread_info(p)->cpu = cpu; +#endif p->wake_cpu = cpu; #endif } diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c index a71e94cecdb6..9c56841227cc 100644 --- a/kernel/sched/tune.c +++ b/kernel/sched/tune.c @@ -829,7 +829,6 @@ schedtune_boostgroup_init(struct schedtune *st) bg = &per_cpu(cpu_boost_groups, cpu); bg->group[st->idx].boost = 0; bg->group[st->idx].tasks = 0; - raw_spin_lock_init(&bg->lock); } return 0; diff --git a/kernel/signal.c b/kernel/signal.c index b92a047ddc82..5d50ea899b6d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task) * fresh group stop. Read comment in do_signal_stop() for details. */ if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { - sig->flags = SIGNAL_STOP_STOPPED; + signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); return true; } return false; @@ -845,7 +845,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force) * will take ->siglock, notice SIGNAL_CLD_MASK, and * notify its parent. See get_signal_to_deliver(). */ - signal->flags = why | SIGNAL_STOP_CONTINUED; + signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); signal->group_stop_count = 0; signal->group_exit_code = 0; } diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 6816302542b2..f0e5408499b6 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1979,6 +1979,10 @@ static int create_filter(struct trace_event_call *call, if (err && set_str) append_filter_err(ps, filter); } + if (err && !set_str) { + free_event_filter(filter); + filter = NULL; + } create_filter_finish(ps); *filterp = filter; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 73c018d7df00..80b5dbfd187d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3669,8 +3669,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, return -EINVAL; /* creating multiple pwqs breaks ordering guarantee */ - if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) - return -EINVAL; + if (!list_empty(&wq->pwqs)) { + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) + return -EINVAL; + + wq->flags &= ~__WQ_ORDERED; + } ctx = apply_wqattrs_prepare(wq, attrs); @@ -3856,6 +3860,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, struct workqueue_struct *wq; struct pool_workqueue *pwq; + /* + * Unbound && max_active == 1 used to imply ordered, which is no + * longer the case on NUMA machines due to per-node pools. While + * alloc_ordered_workqueue() is the right way to create an ordered + * workqueue, keep the previous behavior to avoid subtle breakages + * on NUMA. + */ + if ((flags & WQ_UNBOUND) && max_active == 1) + flags |= __WQ_ORDERED; + /* see the comment above the definition of WQ_POWER_EFFICIENT */ if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) flags |= WQ_UNBOUND; @@ -4044,13 +4058,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) struct pool_workqueue *pwq; /* disallow meddling with max_active for ordered workqueues */ - if (WARN_ON(wq->flags & __WQ_ORDERED)) + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return; max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); mutex_lock(&wq->mutex); + wq->flags &= ~__WQ_ORDERED; wq->saved_max_active = max_active; for_each_pwq(pwq, wq) @@ -5178,7 +5193,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) * attributes breaks ordering guarantee. Disallow exposing ordered * workqueues. */ - if (WARN_ON(wq->flags & __WQ_ORDERED)) + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return -EINVAL; wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6da96c4f98a5..23b74fd4e28f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -145,7 +145,7 @@ config DEBUG_INFO_REDUCED config DEBUG_INFO_SPLIT bool "Produce split debuginfo in .dwo files" - depends on DEBUG_INFO + depends on DEBUG_INFO && !FRV help Generate debug info into separate .dwo files. This significantly reduces the build directory size for builds with DEBUG_INFO, diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 192134b225ca..076eb03e316b 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -242,6 +242,7 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, */ alloc_flags &= ~GFP_ZONEMASK; alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); + alloc_flags |= __GFP_NOWARN; page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); if (page) prealloc = page_address(page); diff --git a/mm/internal.h b/mm/internal.h index 46d27f378885..e17af58d2bf7 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -454,6 +454,7 @@ struct tlbflush_unmap_batch; #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH void try_to_unmap_flush(void); void try_to_unmap_flush_dirty(void); +void flush_tlb_batched_pending(struct mm_struct *mm); #else static inline void try_to_unmap_flush(void) { @@ -461,6 +462,8 @@ static inline void try_to_unmap_flush(void) static inline void try_to_unmap_flush_dirty(void) { } - +static inline void flush_tlb_batched_pending(struct mm_struct *mm) +{ +} #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ #endif /* __MM_INTERNAL_H */ diff --git a/mm/memory.c b/mm/memory.c index 8689df9b09d5..d6e10c888541 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1127,6 +1127,7 @@ again: init_rss_vec(rss); start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pte = start_pte; + flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); do { pte_t ptent = *pte; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index d56142b66171..177668a9c267 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -895,11 +895,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, *policy |= (pol->flags & MPOL_MODE_FLAGS); } - if (vma) { - up_read(¤t->mm->mmap_sem); - vma = NULL; - } - err = 0; if (nmask) { if (mpol_store_user_nodemask(pol)) { diff --git a/mm/mempool.c b/mm/mempool.c index 004d42b1dfaf..7924f4f58a6d 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -135,8 +135,8 @@ static void *remove_element(mempool_t *pool) void *element = pool->elements[--pool->curr_nr]; BUG_ON(pool->curr_nr < 0); - check_element(pool, element); kasan_unpoison_element(pool, element); + check_element(pool, element); return element; } diff --git a/mm/migrate.c b/mm/migrate.c index 85af2816b6d2..a021071eceaf 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -40,6 +40,7 @@ #include <linux/mmu_notifier.h> #include <linux/page_idle.h> #include <linux/page_owner.h> +#include <linux/ptrace.h> #include <asm/tlbflush.h> @@ -1649,7 +1650,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, const int __user *, nodes, int __user *, status, int, flags) { - const struct cred *cred = current_cred(), *tcred; struct task_struct *task; struct mm_struct *mm; int err; @@ -1673,14 +1673,9 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, /* * Check if this process has the right to modify the specified - * process. The right exists if the process has administrative - * capabilities, superuser privileges or the same - * userid as the target process. + * process. Use the regular "ptrace_may_access()" checks. */ - tcred = __task_cred(task); - if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && - !capable(CAP_SYS_NICE)) { + if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { rcu_read_unlock(); err = -EPERM; goto out; diff --git a/mm/mprotect.c b/mm/mprotect.c index bddb2c75492d..b8849a3930a0 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -72,6 +72,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (!pte) return 0; + flush_tlb_batched_pending(vma->vm_mm); arch_enter_lazy_mmu_mode(); do { oldpte = *pte; diff --git a/mm/mremap.c b/mm/mremap.c index c25bc6268e46..fe7b7f65f4f4 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -135,6 +135,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); + flush_tlb_batched_pending(vma->vm_mm); arch_enter_lazy_mmu_mode(); for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6759192e69de..915c60258935 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1579,14 +1579,14 @@ int move_freepages(struct zone *zone, #endif for (page = start_page; page <= end_page;) { - /* Make sure we are not inadvertently changing nodes */ - VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); - if (!pfn_valid_within(page_to_pfn(page))) { page++; continue; } + /* Make sure we are not inadvertently changing nodes */ + VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); + if (!PageBuddy(page)) { page++; continue; @@ -5953,8 +5953,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s) } if (pages && s) - pr_info("Freeing %s memory: %ldK (%p - %p)\n", - s, pages << (PAGE_SHIFT - 10), start, end); + pr_info("Freeing %s memory: %ldK\n", + s, pages << (PAGE_SHIFT - 10)); return pages; } @@ -6910,7 +6910,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, /* Make sure the range is really isolated. */ if (test_pages_isolated(outer_start, end, false)) { - pr_info("%s: [%lx, %lx) PFNs busy\n", + pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n", __func__, outer_start, end); ret = -EBUSY; goto done; diff --git a/mm/page_owner.c b/mm/page_owner.c index 3a9a358e7c63..10b7f196b005 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -129,7 +129,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags) .nr_entries = 0, .entries = entries, .max_entries = PAGE_OWNER_STACK_DEPTH, - .skip = 0 + .skip = 2 }; depot_stack_handle_t handle; diff --git a/mm/rmap.c b/mm/rmap.c index 59489b3b1ac6..cbaf273b0f97 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -649,6 +649,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, tlb_ubc->flush_required = true; /* + * Ensure compiler does not re-order the setting of tlb_flush_batched + * before the PTE is cleared. + */ + barrier(); + mm->tlb_flush_batched = true; + + /* * If the PTE was dirty then it's best to assume it's writable. The * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() * before the page is queued for IO. @@ -675,6 +682,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) return should_defer; } + +/* + * Reclaim unmaps pages under the PTL but do not flush the TLB prior to + * releasing the PTL if TLB flushes are batched. It's possible for a parallel + * operation such as mprotect or munmap to race between reclaim unmapping + * the page and flushing the page. If this race occurs, it potentially allows + * access to data via a stale TLB entry. Tracking all mm's that have TLB + * batching in flight would be expensive during reclaim so instead track + * whether TLB batching occurred in the past and if so then do a flush here + * if required. This will cost one additional flush per reclaim cycle paid + * by the first operation at risk such as mprotect and mumap. + * + * This must be called under the PTL so that an access to tlb_flush_batched + * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise + * via the PTL. + */ +void flush_tlb_batched_pending(struct mm_struct *mm) +{ + if (mm->tlb_flush_batched) { + flush_tlb_mm(mm); + + /* + * Do not allow the compiler to re-order the clearing of + * tlb_flush_batched before the tlb is flushed. + */ + barrier(); + mm->tlb_flush_batched = false; + } +} #else static void set_tlb_ubc_flush_pending(struct mm_struct *mm, struct page *page, bool writable) diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index abeccb56fbbd..b08d36c952dd 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -484,16 +484,16 @@ static int bnep_session(void *arg) struct net_device *dev = s->dev; struct sock *sk = s->sock->sk; struct sk_buff *skb; - wait_queue_t wait; + DEFINE_WAIT_FUNC(wait, woken_wake_function); BT_DBG(""); set_user_nice(current, -15); - init_waitqueue_entry(&wait, current); add_wait_queue(sk_sleep(sk), &wait); while (1) { - set_current_state(TASK_INTERRUPTIBLE); + /* Ensure session->terminate is updated */ + smp_mb__before_atomic(); if (atomic_read(&s->terminate)) break; @@ -515,9 +515,8 @@ static int bnep_session(void *arg) break; netif_wake_queue(dev); - schedule(); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } - __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); /* Cleanup session */ @@ -663,7 +662,7 @@ int bnep_del_connection(struct bnep_conndel_req *req) s = __bnep_get_session(req->dst); if (s) { atomic_inc(&s->terminate); - wake_up_process(s->task); + wake_up_interruptible(sk_sleep(s->sock->sk)); } else err = -ENOENT; diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 011747337858..77f73bfa840b 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -281,16 +281,16 @@ static int cmtp_session(void *arg) struct cmtp_session *session = arg; struct sock *sk = session->sock->sk; struct sk_buff *skb; - wait_queue_t wait; + DEFINE_WAIT_FUNC(wait, woken_wake_function); BT_DBG("session %pK", session); set_user_nice(current, -15); - init_waitqueue_entry(&wait, current); add_wait_queue(sk_sleep(sk), &wait); while (1) { - set_current_state(TASK_INTERRUPTIBLE); + /* Ensure session->terminate is updated */ + smp_mb__before_atomic(); if (atomic_read(&session->terminate)) break; @@ -307,9 +307,8 @@ static int cmtp_session(void *arg) cmtp_process_transmit(session); - schedule(); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } - __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); down_write(&cmtp_session_sem); @@ -394,7 +393,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) err = cmtp_attach_device(session); if (err < 0) { atomic_inc(&session->terminate); - wake_up_process(session->task); + wake_up_interruptible(sk_sleep(session->sock->sk)); up_write(&cmtp_session_sem); return err; } @@ -432,7 +431,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req) /* Stop session thread */ atomic_inc(&session->terminate); - wake_up_process(session->task); + + /* Ensure session->terminate is updated */ + smp_mb__after_atomic(); + + wake_up_interruptible(sk_sleep(session->sock->sk)); } else err = -ENOENT; diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index f02ffe558a08..f64de569175a 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -36,6 +36,7 @@ #define VERSION "1.2" static DECLARE_RWSEM(hidp_session_sem); +static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq); static LIST_HEAD(hidp_session_list); static unsigned char hidp_keycode[256] = { @@ -1069,12 +1070,12 @@ static int hidp_session_start_sync(struct hidp_session *session) * Wake up session thread and notify it to stop. This is asynchronous and * returns immediately. Call this whenever a runtime error occurs and you want * the session to stop. - * Note: wake_up_process() performs any necessary memory-barriers for us. + * Note: wake_up_interruptible() performs any necessary memory-barriers for us. */ static void hidp_session_terminate(struct hidp_session *session) { atomic_inc(&session->terminate); - wake_up_process(session->task); + wake_up_interruptible(&hidp_session_wq); } /* @@ -1181,7 +1182,9 @@ static void hidp_session_run(struct hidp_session *session) struct sock *ctrl_sk = session->ctrl_sock->sk; struct sock *intr_sk = session->intr_sock->sk; struct sk_buff *skb; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + add_wait_queue(&hidp_session_wq, &wait); for (;;) { /* * This thread can be woken up two ways: @@ -1189,12 +1192,10 @@ static void hidp_session_run(struct hidp_session *session) * session->terminate flag and wakes this thread up. * - Via modifying the socket state of ctrl/intr_sock. This * thread is woken up by ->sk_state_changed(). - * - * Note: set_current_state() performs any necessary - * memory-barriers for us. */ - set_current_state(TASK_INTERRUPTIBLE); + /* Ensure session->terminate is updated */ + smp_mb__before_atomic(); if (atomic_read(&session->terminate)) break; @@ -1228,11 +1229,22 @@ static void hidp_session_run(struct hidp_session *session) hidp_process_transmit(session, &session->ctrl_transmit, session->ctrl_sock); - schedule(); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } + remove_wait_queue(&hidp_session_wq, &wait); atomic_inc(&session->terminate); - set_current_state(TASK_RUNNING); + + /* Ensure session->terminate is updated */ + smp_mb__after_atomic(); +} + +static int hidp_session_wake_function(wait_queue_t *wait, + unsigned int mode, + int sync, void *key) +{ + wake_up_interruptible(&hidp_session_wq); + return false; } /* @@ -1245,7 +1257,8 @@ static void hidp_session_run(struct hidp_session *session) static int hidp_session_thread(void *arg) { struct hidp_session *session = arg; - wait_queue_t ctrl_wait, intr_wait; + DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function); + DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function); BT_DBG("session %pK", session); @@ -1255,8 +1268,6 @@ static int hidp_session_thread(void *arg) set_user_nice(current, -15); hidp_set_timer(session); - init_waitqueue_entry(&ctrl_wait, current); - init_waitqueue_entry(&intr_wait, current); add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); /* This memory barrier is paired with wq_has_sleeper(). See diff --git a/net/core/dev.c b/net/core/dev.c index 1ce844e41eaf..ccc6fe0e5ca1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2553,7 +2553,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) { if (tx_path) return skb->ip_summed != CHECKSUM_PARTIAL && - skb->ip_summed != CHECKSUM_NONE; + skb->ip_summed != CHECKSUM_UNNECESSARY; return skb->ip_summed == CHECKSUM_NONE; } diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index b94b1d293506..151e047ce072 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; + ifr.ifr_name[IFNAMSIZ-1] = 0; error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex); if (error) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2ec5324a7ff7..5b3d611d8b5f 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1742,7 +1742,8 @@ static int do_setlink(const struct sk_buff *skb, struct sockaddr *sa; int len; - len = sizeof(sa_family_t) + dev->addr_len; + len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, + sizeof(*sa)); sa = kmalloc(len, GFP_KERNEL); if (!sa) { err = -ENOMEM; diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 1704948e6a12..f227f002c73d 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk) * singleton values (which always leads to failure). * These settings can still (later) be overridden via sockopts. */ - if (ccid_get_builtin_ccids(&tx.val, &tx.len) || - ccid_get_builtin_ccids(&rx.val, &rx.len)) + if (ccid_get_builtin_ccids(&tx.val, &tx.len)) return -ENOBUFS; + if (ccid_get_builtin_ccids(&rx.val, &rx.len)) { + kfree(tx.val); + return -ENOBUFS; + } if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 6467bf392e1b..e217f17997a4 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -635,6 +635,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); + reqsk_put(req); return 0; drop_and_free: diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 3470ad1843bb..09a9ab65f4e1 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -376,6 +376,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); + reqsk_put(req); return 0; drop_and_free: diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 9fe25bf63296..b68168fcc06a 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -24,6 +24,7 @@ #include <net/checksum.h> #include <net/inet_sock.h> +#include <net/inet_common.h> #include <net/sock.h> #include <net/xfrm.h> @@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type) EXPORT_SYMBOL_GPL(dccp_packet_name); +static void dccp_sk_destruct(struct sock *sk) +{ + struct dccp_sock *dp = dccp_sk(sk); + + ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); + dp->dccps_hc_tx_ccid = NULL; + inet_sock_destruct(sk); +} + int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) { struct dccp_sock *dp = dccp_sk(sk); @@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) icsk->icsk_syn_retries = sysctl_dccp_request_retries; sk->sk_state = DCCP_CLOSED; sk->sk_write_space = dccp_write_space; + sk->sk_destruct = dccp_sk_destruct; icsk->icsk_sync_mss = dccp_sync_mss; dp->dccps_mss_cache = 536; dp->dccps_rate_last = jiffies; @@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); - /* - * DCCP doesn't use sk_write_queue, just sk_send_head - * for retransmissions - */ + __skb_queue_purge(&sk->sk_write_queue); if (sk->sk_send_head != NULL) { kfree_skb(sk->sk_send_head); sk->sk_send_head = NULL; @@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk) dp->dccps_hc_rx_ackvec = NULL; } ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); - ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); - dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL; + dp->dccps_hc_rx_ccid = NULL; /* clean up feature negotiation state */ dccp_feat_list_purge(&dp->dccps_featneg); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 7c4c881a7187..ee94bd32d6dc 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1320,13 +1320,14 @@ static struct pernet_operations fib_net_ops = { void __init ip_fib_init(void) { - rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); - rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); - rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); + fib_trie_init(); register_pernet_subsys(&fib_net_ops); + register_netdevice_notifier(&fib_netdev_notifier); register_inetaddr_notifier(&fib_inetaddr_notifier); - fib_trie_init(); + rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); + rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); + rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index b2504712259f..313e3c11a15a 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1044,15 +1044,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg) fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); if (!fi) goto failure; - fib_info_cnt++; if (cfg->fc_mx) { fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); - if (!fi->fib_metrics) - goto failure; + if (unlikely(!fi->fib_metrics)) { + kfree(fi); + return ERR_PTR(err); + } atomic_set(&fi->fib_metrics->refcnt, 1); - } else + } else { fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; - + } + fib_info_cnt++; fi->fib_net = net; fi->fib_protocol = cfg->fc_protocol; fi->fib_scope = cfg->fc_scope; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 661bda968594..62e41d38da78 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -922,10 +922,12 @@ static int __ip_append_data(struct sock *sk, csummode = CHECKSUM_PARTIAL; cork->length += length; - if (((length > mtu) || (skb && skb_is_gso(skb))) && + if ((skb && skb_is_gso(skb)) || + (((length + (skb ? skb->len : fragheaderlen)) > mtu) && + (skb_queue_len(queue) <= 1) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && - (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { + (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) { err = ip_ufo_append_data(sk, queue, getfrag, from, length, hh_len, fragheaderlen, transhdrlen, maxfraglen, flags); @@ -1241,6 +1243,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, return -EINVAL; if ((size + skb->len > mtu) && + (skb_queue_len(&sk->sk_write_queue) == 1) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO)) { if (skb->ip_summed != CHECKSUM_PARTIAL) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index fd15e55b28d1..5bdc0caa7f4c 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1247,7 +1247,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) if (mtu) return mtu; - mtu = dst->dev->mtu; + mtu = READ_ONCE(dst->dev->mtu); if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { if (rt->rt_uses_gateway && mtu > 576) diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 2dc982b15df8..a2e1142145df 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -337,6 +337,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) treq = tcp_rsk(req); treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; + treq->txhash = net_tx_rndhash(); req->mss = mss; ireq->ir_num = ntohs(th->dest); ireq->ir_rmt_port = th->source; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 69d52fee247e..0047b151e8e8 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2504,8 +2504,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ - if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || - (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { + if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && + (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { tp->snd_cwnd = tp->snd_ssthresh; tp->snd_cwnd_stamp = tcp_time_stamp; } @@ -3029,8 +3029,7 @@ void tcp_rearm_rto(struct sock *sk) /* delta may not be positive if the socket is locked * when the retrans timer fires and is rescheduled. */ - if (delta > 0) - rto = delta; + rto = max(delta, 1); } inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, TCP_RTO_MAX); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2ca323b68efd..4e88f93f71c8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3256,6 +3256,9 @@ int tcp_connect(struct sock *sk) struct sk_buff *buff; int err; + if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) + return -EHOSTUNREACH; /* Routing failure or similar. */ + tcp_connect_init(sk); if (unlikely(tp->repair)) { diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 4aef80d30fab..c1a84472cc0c 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -640,7 +640,8 @@ static void tcp_keepalive_timer (unsigned long data) goto death; } - if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) + if (!sock_flag(sk, SOCK_KEEPOPEN) || + ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) goto out; elapsed = keepalive_time_when(tp); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 254fcc7f1825..4d6f09c05a12 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -824,7 +824,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) if (is_udplite) /* UDP-Lite */ csum = udplite_csum(skb); - else if (sk->sk_no_check_tx) { /* UDP csum disabled */ + else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */ skb->ip_summed = CHECKSUM_NONE; goto send; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 6396f1c80ae9..6dfc3daf7c21 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -231,7 +231,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, if (uh->check == 0) uh->check = CSUM_MANGLED_0; - skb->ip_summed = CHECKSUM_NONE; + skb->ip_summed = CHECKSUM_UNNECESSARY; /* Fragment the skb. IP headers of the fragments are updated in * inet_gso_segment() diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index f60e8caea767..aad8cdf15472 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -892,6 +892,8 @@ add: } nsiblings = iter->rt6i_nsiblings; fib6_purge_rt(iter, fn, info->nl_net); + if (fn->rr_ptr == iter) + fn->rr_ptr = NULL; rt6_release(iter); if (nsiblings) { @@ -904,6 +906,8 @@ add: if (rt6_qualify_for_ecmp(iter)) { *ins = iter->dst.rt6_next; fib6_purge_rt(iter, fn, info->nl_net); + if (fn->rr_ptr == iter) + fn->rr_ptr = NULL; rt6_release(iter); nsiblings--; } else { @@ -992,7 +996,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, /* Create subtree root node */ sfn = node_alloc(); if (!sfn) - goto st_failure; + goto failure; sfn->leaf = info->nl_net->ipv6.ip6_null_entry; atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); @@ -1008,12 +1012,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, if (IS_ERR(sn)) { /* If it is failed, discard just allocated - root, and then (in st_failure) stale node + root, and then (in failure) stale node in main tree. */ node_free(sfn); err = PTR_ERR(sn); - goto st_failure; + goto failure; } /* Now link new subtree to main tree */ @@ -1027,7 +1031,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, if (IS_ERR(sn)) { err = PTR_ERR(sn); - goto st_failure; + goto failure; } } @@ -1069,22 +1073,22 @@ out: atomic_inc(&pn->leaf->rt6i_ref); } #endif - if (!(rt->dst.flags & DST_NOCACHE)) - dst_free(&rt->dst); + goto failure; } return err; -#ifdef CONFIG_IPV6_SUBTREES - /* Subtree creation failed, probably main tree node - is orphan. If it is, shoot it. +failure: + /* fn->leaf could be NULL if fn is an intermediate node and we + * failed to add the new route to it in both subtree creation + * failure and fib6_add_rt2node() failure case. + * In both cases, fib6_repair_tree() should be called to fix + * fn->leaf. */ -st_failure: if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) fib6_repair_tree(info->nl_net, fn); if (!(rt->dst.flags & DST_NOCACHE)) dst_free(&rt->dst); return err; -#endif } /* diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 6177acdf60ac..635742a6d73f 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -647,8 +647,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, *prevhdr = NEXTHDR_FRAGMENT; tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); if (!tmp_hdr) { - IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } @@ -767,8 +765,6 @@ slow_path: frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + hroom + troom, GFP_ATOMIC); if (!frag) { - IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } @@ -1360,11 +1356,12 @@ emsgsize: */ cork->length += length; - if ((((length + fragheaderlen) > mtu) || - (skb && skb_is_gso(skb))) && + if ((skb && skb_is_gso(skb)) || + (((length + (skb ? skb->len : headersize)) > mtu) && + (skb_queue_len(queue) <= 1) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO) && - (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { + (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) { err = ip6_ufo_append_data(sk, queue, getfrag, from, length, hh_len, fragheaderlen, exthdrlen, transhdrlen, mtu, flags, fl6); diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 8b56c5240429..f9f02581c4ca 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident); int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { - u16 offset = sizeof(struct ipv6hdr); + unsigned int offset = sizeof(struct ipv6hdr); unsigned int packet_len = skb_tail_pointer(skb) - skb_network_header(skb); int found_rhdr = 0; @@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) while (offset <= packet_len) { struct ipv6_opt_hdr *exthdr; + unsigned int len; switch (**nexthdr) { @@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + offset); - offset += ipv6_optlen(exthdr); + len = ipv6_optlen(exthdr); + if (len + offset >= IPV6_MAXPLEN) + return -EINVAL; + offset += len; *nexthdr = &exthdr->nexthdr; } diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 336843ca4e6b..7f3667635431 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -210,6 +210,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) treq->snt_synack.v64 = 0; treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; + treq->txhash = net_tx_rndhash(); /* * We need to lookup the dst_entry to get the correct window size. diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index a8cabc876348..329ae3ccfa35 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1073,6 +1073,7 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, */ offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + csum = skb->csum; skb->ip_summed = CHECKSUM_NONE; diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 01582966ffa0..2e3c12eeca07 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -86,7 +86,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, if (uh->check == 0) uh->check = CSUM_MANGLED_0; - skb->ip_summed = CHECKSUM_NONE; + skb->ip_summed = CHECKSUM_UNNECESSARY; /* Check if there is enough headroom to insert fragment header. */ tnl_hlen = skb_tnl_header_len(skb); diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 8d2f7c9b491d..4a116d766c15 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -2227,7 +2227,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); - struct irda_device_list list; + struct irda_device_list list = { 0 }; struct irda_device_info *discoveries; struct irda_ias_set * ias_opt; /* IAS get/query params */ struct ias_object * ias_obj; /* Object in IAS */ diff --git a/net/key/af_key.c b/net/key/af_key.c index 2e1050ec2cf0..94bf810ad242 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, #define BROADCAST_ONE 1 #define BROADCAST_REGISTERED 2 #define BROADCAST_PROMISC_ONLY 4 -static int pfkey_broadcast(struct sk_buff *skb, +static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, int broadcast_flags, struct sock *one_sk, struct net *net) { @@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb, rcu_read_unlock(); if (one_sk != NULL) - err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk); + err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); kfree_skb(skb2); kfree_skb(skb); @@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk) hdr = (struct sadb_msg *) pfk->dump.skb->data; hdr->sadb_msg_seq = 0; hdr->sadb_msg_errno = rc; - pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, + pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk, sock_net(&pfk->sk)); pfk->dump.skb = NULL; } @@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk) hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); - pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); + pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk)); return 0; } @@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_ xfrm_state_put(x); - pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net); + pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net); return 0; } @@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c) hdr->sadb_msg_seq = c->seq; hdr->sadb_msg_pid = c->portid; - pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x)); + pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x)); return 0; } @@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg out_hdr->sadb_msg_reserved = 0; out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; - pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk)); + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk)); return 0; } @@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad return -ENOBUFS; } - pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk)); - + pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, + sock_net(sk)); return 0; } @@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr) hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); - return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); + return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, + sock_net(sk)); } static int key_notify_sa_flush(const struct km_event *c) @@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c) hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); hdr->sadb_msg_reserved = 0; - pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net); + pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; } @@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr) out_hdr->sadb_msg_pid = pfk->dump.msg_portid; if (pfk->dump.skb) - pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, + pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk, sock_net(&pfk->sk)); pfk->dump.skb = out_skb; @@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb new_hdr->sadb_msg_errno = 0; } - pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk)); + pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk)); return 0; } @@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_seq = c->seq; out_hdr->sadb_msg_pid = c->portid; - pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp)); + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); return 0; } @@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; - pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp)); + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp)); err = 0; out: @@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) out_hdr->sadb_msg_pid = pfk->dump.msg_portid; if (pfk->dump.skb) - pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, + pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk, sock_net(&pfk->sk)); pfk->dump.skb = out_skb; @@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c) hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); hdr->sadb_msg_reserved = 0; - pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net); + pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; } @@ -2814,7 +2815,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb void *ext_hdrs[SADB_EXT_MAX]; int err; - pfkey_broadcast(skb_clone(skb, GFP_KERNEL), + pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, BROADCAST_PROMISC_ONLY, NULL, sock_net(sk)); memset(ext_hdrs, 0, sizeof(ext_hdrs)); @@ -3036,7 +3037,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c) out_hdr->sadb_msg_seq = 0; out_hdr->sadb_msg_pid = 0; - pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x)); + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, + xs_net(x)); return 0; } @@ -3226,7 +3228,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_ctx->ctx_len); } - return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); + return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, + xs_net(x)); } static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, @@ -3424,7 +3427,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, n_port->sadb_x_nat_t_port_port = sport; n_port->sadb_x_nat_t_port_reserved = 0; - return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); + return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, + xs_net(x)); } #ifdef CONFIG_NET_KEY_MIGRATE @@ -3616,7 +3620,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, } /* broadcast migrate message to sockets */ - pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net); + pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net); return 0; diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 1a9545965c0d..531ca55f1af6 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, rcu_read_lock(); t = rcu_dereference(nf_ct_ext_types[id]); - BUG_ON(t == NULL); + if (!t) { + rcu_read_unlock(); + return NULL; + } + off = ALIGN(sizeof(struct nf_ct_ext), t->align); len = off + t->len + var_alloc_len; alloc_size = t->alloc_size + var_alloc_len; @@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id, rcu_read_lock(); t = rcu_dereference(nf_ct_ext_types[id]); - BUG_ON(t == NULL); + if (!t) { + rcu_read_unlock(); + return NULL; + } newoff = ALIGN(old->len, t->align); newlen = newoff + t->len + var_alloc_len; @@ -186,6 +193,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type) RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL); update_alloc_size(type); mutex_unlock(&nf_ct_ext_type_mutex); - rcu_barrier(); /* Wait for completion of call_rcu()'s */ + synchronize_rcu(); } EXPORT_SYMBOL_GPL(nf_ct_extend_unregister); diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c index fef69cb21f6f..b0e0555e79ad 100644 --- a/net/netfilter/xt_qtaguid.c +++ b/net/netfilter/xt_qtaguid.c @@ -1713,18 +1713,9 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par) } MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n", par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par)); - if (sk != NULL) { - set_sk_callback_lock = true; - read_lock_bh(&sk->sk_callback_lock); - MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n", - par->hooknum, sk, sk->sk_socket, - sk->sk_socket ? sk->sk_socket->file : (void *)-1LL); - filp = sk->sk_socket ? sk->sk_socket->file : NULL; - MT_DEBUG("qtaguid[%d]: filp...uid=%u\n", - par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1); - } - if (sk == NULL || sk->sk_socket == NULL) { + + if (sk == NULL) { /* * Here, the qtaguid_find_sk() using connection tracking * couldn't find the owner, so for now we just count them @@ -1732,9 +1723,7 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par) */ if (do_tag_stat) account_for_uid(skb, sk, 0, par); - MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n", - par->hooknum, - sk ? sk->sk_socket : NULL); + MT_DEBUG("qtaguid[%d]: leaving (sk=NULL)\n", par->hooknum); res = (info->match ^ info->invert) == 0; atomic64_inc(&qtu_events.match_no_sk); goto put_sock_ret_res; @@ -1742,17 +1731,7 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par) res = false; goto put_sock_ret_res; } - filp = sk->sk_socket->file; - if (filp == NULL) { - MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum); - if (do_tag_stat) - account_for_uid(skb, sk, 0, par); - res = ((info->match ^ info->invert) & - (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0; - atomic64_inc(&qtu_events.match_no_sk_file); - goto put_sock_ret_res; - } - sock_uid = filp->f_cred->fsuid; + sock_uid = sk->sk_uid; if (do_tag_stat) account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid), par); @@ -1766,8 +1745,8 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par) kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min); kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max); - if ((uid_gte(filp->f_cred->fsuid, uid_min) && - uid_lte(filp->f_cred->fsuid, uid_max)) ^ + if ((uid_gte(sk->sk_uid, uid_min) && + uid_lte(sk->sk_uid, uid_max)) ^ !(info->invert & XT_QTAGUID_UID)) { MT_DEBUG("qtaguid[%d]: leaving uid not matching\n", par->hooknum); @@ -1778,7 +1757,19 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par) if (info->match & XT_QTAGUID_GID) { kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min); kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max); - + set_sk_callback_lock = true; + read_lock_bh(&sk->sk_callback_lock); + MT_DEBUG("qtaguid[%d]: sk=%pK->sk_socket=%pK->file=%pK\n", + par->hooknum, sk, sk->sk_socket, + sk->sk_socket ? sk->sk_socket->file : (void *)-1LL); + filp = sk->sk_socket ? sk->sk_socket->file : NULL; + if (!filp) { + res = ((info->match ^ info->invert) & XT_QTAGUID_GID) == 0; + atomic64_inc(&qtu_events.match_no_sk_gid); + goto put_sock_ret_res; + } + MT_DEBUG("qtaguid[%d]: filp...uid=%u\n", + par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1); if ((gid_gte(filp->f_cred->fsgid, gid_min) && gid_lte(filp->f_cred->fsgid, gid_max)) ^ !(info->invert & XT_QTAGUID_GID)) { @@ -1950,7 +1941,7 @@ static int qtaguid_ctrl_proc_show(struct seq_file *m, void *v) "match_found_sk_in_ct=%llu " "match_found_no_sk_in_ct=%llu " "match_no_sk=%llu " - "match_no_sk_file=%llu\n", + "match_no_sk_gid=%llu\n", (u64)atomic64_read(&qtu_events.sockets_tagged), (u64)atomic64_read(&qtu_events.sockets_untagged), (u64)atomic64_read(&qtu_events.counter_set_changes), @@ -1962,7 +1953,7 @@ static int qtaguid_ctrl_proc_show(struct seq_file *m, void *v) (u64)atomic64_read(&qtu_events.match_found_sk_in_ct), (u64)atomic64_read(&qtu_events.match_found_no_sk_in_ct), (u64)atomic64_read(&qtu_events.match_no_sk), - (u64)atomic64_read(&qtu_events.match_no_sk_file)); + (u64)atomic64_read(&qtu_events.match_no_sk_gid)); /* Count the following as part of the last item_index. No need * to lock the sock_tag_list here since it is already locked when diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h index 8178fbdfb036..c7052707a6a4 100644 --- a/net/netfilter/xt_qtaguid_internal.h +++ b/net/netfilter/xt_qtaguid_internal.h @@ -289,10 +289,10 @@ struct qtaguid_event_counts { */ atomic64_t match_no_sk; /* - * The file ptr in the sk_socket wasn't there. + * The file ptr in the sk_socket wasn't there and we couldn't get GID. * This might happen for traffic while the socket is being closed. */ - atomic64_t match_no_sk_file; + atomic64_t match_no_sk_gid; }; /* Track the set active_set for the given tag. */ diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 8a2a489b2cd3..ede54061c554 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -237,7 +237,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, transparent = xt_socket_sk_is_transparent(sk); if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && - transparent) + transparent && sk_fullsock(sk)) pskb->mark = sk->sk_mark; sock_gen_put(sk); @@ -419,7 +419,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par) transparent = xt_socket_sk_is_transparent(sk); if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && - transparent) + transparent && sk_fullsock(sk)) pskb->mark = sk->sk_mark; if (sk != skb->sk) diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index 2b0f0ac498d2..5a58f9f38095 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c @@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, } create_info = (struct hci_create_pipe_resp *)skb->data; + if (create_info->pipe >= NFC_HCI_MAX_PIPES) { + status = NFC_HCI_ANY_E_NOK; + goto exit; + } + /* Save the new created pipe and bind with local gate, * the description for skb->data[3] is destination gate id * but since we received this cmd from host controller, we @@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, } delete_info = (struct hci_delete_pipe_noti *)skb->data; + if (delete_info->pipe >= NFC_HCI_MAX_PIPES) { + status = NFC_HCI_ANY_E_NOK; + goto exit; + } + hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE; hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST; break; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index ad58d2a6284e..6a2507f24b0f 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -577,8 +577,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, nla_for_each_nested(a, attr, rem) { int type = nla_type(a); - int maxlen = ovs_ct_attr_lens[type].maxlen; - int minlen = ovs_ct_attr_lens[type].minlen; + int maxlen; + int minlen; if (type > OVS_CT_ATTR_MAX) { OVS_NLERR(log, @@ -586,6 +586,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, type, OVS_CT_ATTR_MAX); return -EINVAL; } + + maxlen = ovs_ct_attr_lens[type].maxlen; + minlen = ovs_ct_attr_lens[type].minlen; if (nla_len(a) < minlen || nla_len(a) > maxlen) { OVS_NLERR(log, "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)", diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f8d6a0ca9c03..148ec130d99d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3622,14 +3622,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) - return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; if (val > INT_MAX) return -EINVAL; - po->tp_reserve = val; - return 0; + lock_sock(sk); + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { + ret = -EBUSY; + } else { + po->tp_reserve = val; + ret = 0; + } + release_sock(sk); + return ret; } case PACKET_LOSS: { @@ -4225,7 +4230,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, register_prot_hook(sk); } spin_unlock(&po->bind_lock); - if (closing && (po->tp_version > TPACKET_V2)) { + if (pg_vec && (po->tp_version > TPACKET_V2)) { /* Because we don't support block-based V3 on tx-ring */ if (!tx_ring) prb_shutdown_retire_blk_timer(po, rb_queue); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index d05869646515..075b0d22f213 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -34,6 +34,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int { struct xt_tgchk_param par; struct xt_target *target; + struct ipt_entry e = {}; int ret = 0; target = xt_request_find_target(AF_INET, t->u.user.name, @@ -42,8 +43,9 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int return PTR_ERR(target); t->u.kernel.target = target; + memset(&par, 0, sizeof(par)); par.table = table; - par.entryinfo = NULL; + par.entryinfo = &e; par.target = target; par.targinfo = t->data; par.hook_mask = hook; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 4431e2833e45..3f2c3eed04da 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -434,6 +434,7 @@ congestion_drop: qdisc_drop(head, sch); slot_queue_add(slot, skb); + qdisc_tree_reduce_backlog(sch, 0, delta); return NET_XMIT_CN; } @@ -465,8 +466,10 @@ enqueue: /* Return Congestion Notification only if we dropped a packet * from this flow. */ - if (qlen != slot->qlen) + if (qlen != slot->qlen) { + qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb)); return NET_XMIT_CN; + } /* As we dropped a packet, better let upper stack know this */ qdisc_tree_reduce_backlog(sch, 1, dropped); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 7527c168e471..e33e9bd4ed5a 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -510,7 +510,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, { addr->sa.sa_family = AF_INET6; addr->v6.sin6_port = port; + addr->v6.sin6_flowinfo = 0; addr->v6.sin6_addr = *saddr; + addr->v6.sin6_scope_id = 0; } /* Compare addresses exactly. diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index a0c90572d0e5..f86c6555a539 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, arg = nlmsg_new(0, GFP_KERNEL); if (!arg) { kfree_skb(msg->rep); + msg->rep = NULL; return -ENOMEM; } err = __tipc_nl_compat_dumpit(cmd, msg, arg); - if (err) + if (err) { kfree_skb(msg->rep); - + msg->rep = NULL; + } kfree_skb(arg); return err; diff --git a/net/wireless/db.txt b/net/wireless/db.txt index 0727a6e9f780..86005410a22f 100644 --- a/net/wireless/db.txt +++ b/net/wireless/db.txt @@ -224,17 +224,16 @@ country BY: DFS-ETSI (5490 - 5710 @ 160), (30), DFS country BZ: - (2402 - 2482 @ 40), (36) - (5170 - 5330 @ 160), (27) - (5490 - 5730 @ 160), (36) - (5735 - 5835 @ 80), (36) + (2402 - 2482 @ 40), (20) + (5170 - 5330 @ 160), (23) + (5490 - 5730 @ 160), (30) + (5735 - 5835 @ 80), (30) country CA: DFS-FCC (2402 - 2472 @ 40), (30) (5170 - 5250 @ 80), (24), AUTO-BW (5250 - 5330 @ 80), (24), DFS, AUTO-BW - (5490 - 5590 @ 80), (24), DFS - (5650 - 5730 @ 80), (24), DFS + (5490 - 5730 @ 160), (24), DFS (5735 - 5835 @ 80), (30) # 60 gHz band channels 1-3 (57240 - 63720 @ 2160), (40) @@ -683,7 +682,13 @@ country IL: DFS-ETSI country IN: (2402 - 2482 @ 40), (20) (5170 - 5330 @ 160), (23) - (5735 - 5835 @ 80), (30) + (5735 - 5835 @ 80), (33) + +country IQ: DFS-ETSI + (2402 - 2482 @ 40), (20) + (5170 - 5250 @ 80), (23), AUTO-BW + (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5490 - 5710 @ 160), (30), DFS country IS: DFS-ETSI (2402 - 2482 @ 40), (20) @@ -737,7 +742,6 @@ country JO: country JP: DFS-JP (2402 - 2482 @ 40), (20) - (2474 - 2494 @ 20), (20), NO-OFDM (5170 - 5250 @ 80), (20), AUTO-BW, NO-OUTDOOR (5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR (5490 - 5710 @ 160), (20), DFS @@ -759,7 +763,7 @@ country KH: DFS-ETSI country KN: DFS-FCC (2402 - 2482 @ 40), (20) (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5250 - 5330 @ 80), (30), DFS, AUTO-BW (5490 - 5710 @ 160), (30), DFS (5735 - 5815 @ 80), (30) @@ -1010,7 +1014,7 @@ country MY: DFS-FCC (5170 - 5250 @ 80), (24), AUTO-BW (5250 - 5330 @ 80), (24), DFS, AUTO-BW (5490 - 5650 @ 160), (24), DFS - (5735 - 5815 @ 80), (24) + (5735 - 5835 @ 80), (24) # 60 gHz band channels 1-3 (57240 - 63720 @ 2160), (40) @@ -1090,7 +1094,7 @@ country OM: DFS-ETSI (5490 - 5710 @ 160), (30), DFS country PA: - (2402 - 2472 @ 40), (30) + (2402 - 2472 @ 40), (36) (5170 - 5250 @ 80), (23), AUT0-BW (5250 - 5330 @ 80), (30), AUTO-BW (5735 - 5835 @ 80), (36) @@ -1375,9 +1379,9 @@ country TR: DFS-ETSI country TT: (2402 - 2482 @ 40), (20) - (5170 - 5330 @ 160), (27) - (5490 - 5730 @ 160), (36) - (5735 - 5835 @ 80), (36) + (5170 - 5330 @ 160), (24) + (5490 - 5730 @ 160), (24) + (5735 - 5835 @ 80), (30) # 60 gHz band channels 1-3, FCC (57240 - 63720 @ 2160), (40) @@ -1451,7 +1455,7 @@ country UY: DFS-FCC country UZ: DFS-ETSI (2402 - 2482 @ 40), (20) (5170 - 5250 @ 80), (23), AUTO-BW - (5250 - 5330 @ 80), (20), DFS, AUTO-BW + (5250 - 5330 @ 80), (23), DFS, AUTO-BW country VC: DFS-ETSI (2402 - 2482 @ 40), (20) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 2a9ec3e05c73..66b21d22f512 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -487,7 +487,8 @@ nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = { /* policy for GTK rekey offload attributes */ static const struct nla_policy nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = { - [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN }, + [NL80211_REKEY_DATA_KEK] = { .type = NLA_BINARY, + .len = FILS_MAX_KEK_LEN }, [NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN }, [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN }, }; @@ -8595,6 +8596,45 @@ static int nl80211_update_connect_params(struct sk_buff *skb, changed |= UPDATE_ASSOC_IES; } + if (wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) && + info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] && + info->attrs[NL80211_ATTR_FILS_ERP_REALM] && + info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] && + info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { + connect.fils_erp_username = + nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); + connect.fils_erp_username_len = + nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]); + connect.fils_erp_realm = + nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); + connect.fils_erp_realm_len = + nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]); + connect.fils_erp_next_seq_num = + nla_get_u16( + info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]); + connect.fils_erp_rrk = + nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); + connect.fils_erp_rrk_len = + nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]); + changed |= UPDATE_FILS_ERP_INFO; + } else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] || + info->attrs[NL80211_ATTR_FILS_ERP_REALM] || + info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] || + info->attrs[NL80211_ATTR_FILS_ERP_RRK]) { + return -EINVAL; + } + + if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { + u32 auth_type = + nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); + if (!nl80211_valid_auth_type(rdev, auth_type, + NL80211_CMD_CONNECT)) + return -EINVAL; + connect.auth_type = auth_type; + changed |= UPDATE_AUTH_TYPE; + } + wdev_lock(dev->ieee80211_ptr); if (!wdev->current_bss) ret = -ENOLINK; @@ -10266,15 +10306,27 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) if (err) return err; + if (!tb[NL80211_REKEY_DATA_KEK] || !tb[NL80211_REKEY_DATA_REPLAY_CTR] || + (!wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_FILS_SK_OFFLOAD) && + !wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_FILS_STA) && + !tb[NL80211_REKEY_DATA_KCK])) + return -EINVAL; + if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) return -ERANGE; - if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN) + if (nla_len(tb[NL80211_REKEY_DATA_KEK]) < NL80211_KEK_LEN) return -ERANGE; - if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN) + if (tb[NL80211_REKEY_DATA_KCK] && + nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN) return -ERANGE; + memset(&rekey_data, 0, sizeof(rekey_data)); rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]); - rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]); + rekey_data.kek_len = nla_len(tb[NL80211_REKEY_DATA_KEK]); + if (tb[NL80211_REKEY_DATA_KCK]) + rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]); rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]); wdev_lock(wdev); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 7a5a64e70b4d..4c696d4d5ce3 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1691,6 +1691,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, struct sk_buff *skb; int err; + err = verify_policy_dir(dir); + if (err) + return ERR_PTR(err); + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM); @@ -2216,6 +2220,10 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, int n = 0; struct net *net = sock_net(skb->sk); + err = verify_policy_dir(pi->dir); + if (err) + return err; + if (attrs[XFRMA_MIGRATE] == NULL) return -EINVAL; @@ -2331,6 +2339,11 @@ static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, { struct net *net = &init_net; struct sk_buff *skb; + int err; + + err = verify_policy_dir(dir); + if (err) + return err; skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC); if (skb == NULL) @@ -2985,6 +2998,11 @@ out_free_skb: static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) { + int err; + + err = verify_policy_dir(dir); + if (err) + return err; switch (c->event) { case XFRM_MSG_NEWPOLICY: diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 46881f329561..17c55e512de6 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -125,7 +125,7 @@ endif # ifeq ($(CONFIG_KASAN),y) _c_flags += $(if $(patsubst n%,, \ - $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)$(CONFIG_KASAN_SANITIZE_ALL)), \ + $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \ $(CFLAGS_KASAN)) endif diff --git a/sound/core/control.c b/sound/core/control.c index b4fe9b002512..bd01d492f46a 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1126,7 +1126,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, mutex_lock(&ue->card->user_ctl_lock); change = ue->tlv_data_size != size; if (!change) - change = memcmp(ue->tlv_data, new_data, size); + change = memcmp(ue->tlv_data, new_data, size) != 0; kfree(ue->tlv_data); ue->tlv_data = new_data; ue->tlv_data_size = size; diff --git a/sound/core/info.c b/sound/core/info.c index 79dee33b5035..a04016c19f6d 100644 --- a/sound/core/info.c +++ b/sound/core/info.c @@ -754,8 +754,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent) INIT_LIST_HEAD(&entry->children); INIT_LIST_HEAD(&entry->list); entry->parent = parent; - if (parent) + if (parent) { + mutex_lock(&parent->access); list_add_tail(&entry->list, &parent->children); + mutex_unlock(&parent->access); + } return entry; } diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 1f062aaa5414..9af294c72a4d 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -654,7 +654,9 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream) if (substream->ops->hw_free) result = substream->ops->hw_free(substream); snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); - pm_qos_remove_request(&substream->latency_pm_qos_req); + if (pm_qos_request_active(&substream->latency_pm_qos_req)) + pm_qos_remove_request(&substream->latency_pm_qos_req); + return result; } diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index c67f9c212dd1..e326c1d80416 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -1530,19 +1530,14 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void __user *arg) { struct snd_seq_queue_info info; - int result; struct snd_seq_queue *q; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; - result = snd_seq_queue_alloc(client->number, info.locked, info.flags); - if (result < 0) - return result; - - q = queueptr(result); - if (q == NULL) - return -EINVAL; + q = snd_seq_queue_alloc(client->number, info.locked, info.flags); + if (IS_ERR(q)) + return PTR_ERR(q); info.queue = q->queue; info.locked = q->locked; @@ -1552,7 +1547,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, if (! info.name[0]) snprintf(info.name, sizeof(info.name), "Queue-%d", q->queue); strlcpy(q->name, info.name, sizeof(q->name)); - queuefree(q); + snd_use_lock_free(&q->use_lock); if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index 450c5187eecb..79e0c5604ef8 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c @@ -184,22 +184,26 @@ void __exit snd_seq_queues_delete(void) static void queue_use(struct snd_seq_queue *queue, int client, int use); /* allocate a new queue - - * return queue index value or negative value for error + * return pointer to new queue or ERR_PTR(-errno) for error + * The new queue's use_lock is set to 1. It is the caller's responsibility to + * call snd_use_lock_free(&q->use_lock). */ -int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) +struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) { struct snd_seq_queue *q; q = queue_new(client, locked); if (q == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM); q->info_flags = info_flags; queue_use(q, client, 1); + snd_use_lock_use(&q->use_lock); if (queue_list_add(q) < 0) { + snd_use_lock_free(&q->use_lock); queue_delete(q); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } - return q->queue; + return q; } /* delete a queue - queue must be owned by the client */ diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h index 30c8111477f6..719093489a2c 100644 --- a/sound/core/seq/seq_queue.h +++ b/sound/core/seq/seq_queue.h @@ -71,7 +71,7 @@ void snd_seq_queues_delete(void); /* create new queue (constructor) */ -int snd_seq_queue_alloc(int client, int locked, unsigned int flags); +struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags); /* delete queue (destructor) */ int snd_seq_queue_delete(int client, int queueid); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 46f7b023f69c..ac5de4365e15 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -854,6 +854,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 46a34039ecdc..5cab24f52825 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2233,6 +2233,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c index 025a592b4015..55eef61a01de 100644 --- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c +++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c @@ -49,10 +49,10 @@ #define BUS_DOWN 1 /* - * 50 Milliseconds sufficient for DSP bring up in the lpass + * 200 Milliseconds sufficient for DSP bring up in the lpass * after Sub System Restart */ -#define ADSP_STATE_READY_TIMEOUT_MS 50 +#define ADSP_STATE_READY_TIMEOUT_MS 200 #define EAR_PMD 0 #define EAR_PMU 1 @@ -2619,6 +2619,17 @@ static int msm_anlg_cdc_codec_enable_micbias(struct snd_soc_dapm_widget *w, return 0; } +static void set_compander_mode(void *handle, int val) +{ + struct sdm660_cdc_priv *handle_cdc = handle; + struct snd_soc_codec *codec = handle_cdc->codec; + + if (get_codec_version(handle_cdc) >= DIANGU) { + snd_soc_update_bits(codec, + MSM89XX_PMIC_ANALOG_RX_COM_BIAS_DAC, + 0x08, val); + }; +} static void update_clkdiv(void *handle, int val) { struct sdm660_cdc_priv *handle_cdc = handle; @@ -3662,18 +3673,6 @@ static const struct sdm660_cdc_reg_mask_val {MSM89XX_PMIC_ANALOG_RX_COM_OCP_COUNT, 0xFF, 0xFF}, }; -static void msm_anlg_cdc_codec_init_cache(struct snd_soc_codec *codec) -{ - u32 i; - - regcache_cache_only(codec->component.regmap, true); - /* update cache with POR values */ - for (i = 0; i < ARRAY_SIZE(msm89xx_pmic_cdc_defaults); i++) - snd_soc_write(codec, msm89xx_pmic_cdc_defaults[i].reg, - msm89xx_pmic_cdc_defaults[i].def); - regcache_cache_only(codec->component.regmap, false); -} - static void msm_anlg_cdc_codec_init_reg(struct snd_soc_codec *codec) { u32 i; @@ -3719,7 +3718,7 @@ static struct regulator *msm_anlg_cdc_find_regulator( return sdm660_cdc->supplies[i].consumer; } - dev_err(sdm660_cdc->dev, "Error: regulator not found:%s\n" + dev_dbg(sdm660_cdc->dev, "Error: regulator not found:%s\n" , name); return NULL; } @@ -4171,7 +4170,6 @@ static int msm_anlg_cdc_soc_probe(struct snd_soc_codec *codec) ARRAY_SIZE(hph_type_detect_controls)); msm_anlg_cdc_bringup(codec); - msm_anlg_cdc_codec_init_cache(codec); msm_anlg_cdc_codec_init_reg(codec); msm_anlg_cdc_update_reg_defaults(codec); @@ -4649,6 +4647,7 @@ static int msm_anlg_cdc_probe(struct platform_device *pdev) BLOCKING_INIT_NOTIFIER_HEAD(&sdm660_cdc->notifier_mbhc); sdm660_cdc->dig_plat_data.handle = (void *) sdm660_cdc; + sdm660_cdc->dig_plat_data.set_compander_mode = set_compander_mode; sdm660_cdc->dig_plat_data.update_clkdiv = update_clkdiv; sdm660_cdc->dig_plat_data.get_cdc_version = get_cdc_version; sdm660_cdc->dig_plat_data.register_notifier = diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h index 9563565f36d2..d07d1bee4d6b 100644 --- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h +++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h @@ -168,6 +168,7 @@ struct msm_dig_ctrl_data { struct msm_dig_ctrl_platform_data { void *handle; + void (*set_compander_mode)(void *handle, int val); void (*update_clkdiv)(void *handle, int val); int (*get_cdc_version)(void *handle); int (*register_notifier)(void *handle, diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c index 4249ada17c87..5f9dc9c0c392 100644 --- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c +++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c @@ -214,60 +214,92 @@ static int msm_dig_cdc_codec_config_compander(struct snd_soc_codec *codec, int interp_n, int event) { struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec); + int comp_ch_bits_set = 0x03; + int comp_ch_value; dev_dbg(codec->dev, "%s: event %d shift %d, enabled %d\n", __func__, event, interp_n, dig_cdc->comp_enabled[interp_n]); - /* compander is not enabled */ - if (!dig_cdc->comp_enabled[interp_n]) + /* compander is invalid */ + if (dig_cdc->comp_enabled[interp_n] != COMPANDER_1 && + dig_cdc->comp_enabled[interp_n]) { + dev_dbg(codec->dev, "%s: Invalid compander %d\n", __func__, + dig_cdc->comp_enabled[interp_n]); return 0; + } - switch (dig_cdc->comp_enabled[interp_n]) { - case COMPANDER_1: - if (SND_SOC_DAPM_EVENT_ON(event)) { - /* Enable Compander Clock */ - snd_soc_update_bits(codec, - MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x09); - snd_soc_update_bits(codec, - MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x01); + if (SND_SOC_DAPM_EVENT_ON(event)) { + /* compander is not enabled */ + if (!dig_cdc->comp_enabled[interp_n]) { + dig_cdc->set_compander_mode(dig_cdc->handle, 0x00); + return 0; + }; + comp_ch_value = snd_soc_read(codec, + MSM89XX_CDC_CORE_COMP0_B1_CTL); + if (interp_n == 0) { + if ((comp_ch_value & 0x02) == 0x02) { + dev_dbg(codec->dev, + "%s comp ch already enabled\n", + __func__); + return 0; + } + } + if (interp_n == 1) { + if ((comp_ch_value & 0x01) == 0x01) { + dev_dbg(codec->dev, + "%s comp ch already enabled\n", + __func__); + return 0; + } + } + dig_cdc->set_compander_mode(dig_cdc->handle, 0x08); + /* Enable Compander Clock */ + snd_soc_update_bits(codec, + MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x09); + snd_soc_update_bits(codec, + MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x01); + if (dig_cdc->comp_enabled[MSM89XX_RX1]) { snd_soc_update_bits(codec, MSM89XX_CDC_CORE_COMP0_B1_CTL, - 1 << interp_n, 1 << interp_n); - snd_soc_update_bits(codec, - MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x01); - snd_soc_update_bits(codec, - MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0x50); - /* add sleep for compander to settle */ - usleep_range(1000, 1100); - snd_soc_update_bits(codec, - MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x28); + 0x02, 0x02); + } + if (dig_cdc->comp_enabled[MSM89XX_RX2]) { snd_soc_update_bits(codec, - MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0xB0); + MSM89XX_CDC_CORE_COMP0_B1_CTL, + 0x01, 0x01); + } + snd_soc_update_bits(codec, + MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x01); + snd_soc_update_bits(codec, + MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0x50); + /* add sleep for compander to settle */ + usleep_range(1000, 1100); + snd_soc_update_bits(codec, + MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x28); + snd_soc_update_bits(codec, + MSM89XX_CDC_CORE_COMP0_B2_CTL, 0xF0, 0xB0); - /* Enable Compander GPIO */ - if (dig_cdc->codec_hph_comp_gpio) - dig_cdc->codec_hph_comp_gpio(1, codec); - } else if (SND_SOC_DAPM_EVENT_OFF(event)) { - /* Disable Compander GPIO */ - if (dig_cdc->codec_hph_comp_gpio) - dig_cdc->codec_hph_comp_gpio(0, codec); + /* Enable Compander GPIO */ + if (dig_cdc->codec_hph_comp_gpio) + dig_cdc->codec_hph_comp_gpio(1, codec); + } else if (SND_SOC_DAPM_EVENT_OFF(event)) { + /* Disable Compander GPIO */ + if (dig_cdc->codec_hph_comp_gpio) + dig_cdc->codec_hph_comp_gpio(0, codec); + snd_soc_update_bits(codec, + MSM89XX_CDC_CORE_COMP0_B1_CTL, + 1 << interp_n, 0); + comp_ch_bits_set = snd_soc_read(codec, + MSM89XX_CDC_CORE_COMP0_B1_CTL); + if ((comp_ch_bits_set & 0x03) == 0x00) { snd_soc_update_bits(codec, MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x05); - snd_soc_update_bits(codec, - MSM89XX_CDC_CORE_COMP0_B1_CTL, - 1 << interp_n, 0); - snd_soc_update_bits(codec, + snd_soc_update_bits(codec, MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x00); } - break; - default: - dev_dbg(codec->dev, "%s: Invalid compander %d\n", __func__, - dig_cdc->comp_enabled[interp_n]); - break; - }; - + } return 0; } @@ -1327,6 +1359,9 @@ static const struct snd_soc_dapm_route audio_dig_map[] = { {"RX2 MIX1 INP2", "RX3", "I2S RX3"}, {"RX2 MIX1 INP2", "IIR1", "IIR1"}, {"RX2 MIX1 INP2", "IIR2", "IIR2"}, + {"RX2 MIX1 INP3", "RX1", "I2S RX1"}, + {"RX2 MIX1 INP3", "RX2", "I2S RX2"}, + {"RX2 MIX1 INP3", "RX3", "I2S RX3"}, {"RX3 MIX1 INP1", "RX1", "I2S RX1"}, {"RX3 MIX1 INP1", "RX2", "I2S RX2"}, @@ -1338,6 +1373,9 @@ static const struct snd_soc_dapm_route audio_dig_map[] = { {"RX3 MIX1 INP2", "RX3", "I2S RX3"}, {"RX3 MIX1 INP2", "IIR1", "IIR1"}, {"RX3 MIX1 INP2", "IIR2", "IIR2"}, + {"RX3 MIX1 INP3", "RX1", "I2S RX1"}, + {"RX3 MIX1 INP3", "RX2", "I2S RX2"}, + {"RX3 MIX1 INP3", "RX3", "I2S RX3"}, {"RX1 MIX2 INP1", "IIR1", "IIR1"}, {"RX2 MIX2 INP1", "IIR1", "IIR1"}, @@ -2101,6 +2139,7 @@ static int msm_dig_cdc_probe(struct platform_device *pdev) msm_dig_cdc->dig_base, &msm_digital_regmap_config); msm_dig_cdc->update_clkdiv = pdata->update_clkdiv; + msm_dig_cdc->set_compander_mode = pdata->set_compander_mode; msm_dig_cdc->get_cdc_version = pdata->get_cdc_version; msm_dig_cdc->handle = pdata->handle; msm_dig_cdc->register_notifier = pdata->register_notifier; diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h index f0e7a9cf9228..11f36f99f1bd 100644 --- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h +++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h @@ -49,6 +49,7 @@ struct msm_dig_priv { u32 mute_mask; int dapm_bias_off; void *handle; + void (*set_compander_mode)(void *handle, int val); void (*update_clkdiv)(void *handle, int val); int (*get_cdc_version)(void *handle); int (*register_notifier)(void *handle, @@ -58,6 +59,7 @@ struct msm_dig_priv { struct dig_ctrl_platform_data { void *handle; + void (*set_compander_mode)(void *handle, int val); void (*update_clkdiv)(void *handle, int val); int (*get_cdc_version)(void *handle); int (*register_notifier)(void *handle, diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c index 1613c5baa9c7..f995bf22c1c3 100644 --- a/sound/soc/codecs/wcd-dsp-mgr.c +++ b/sound/soc/codecs/wcd-dsp-mgr.c @@ -25,7 +25,8 @@ static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type); /* Component related macros */ -#define WDSP_GET_COMPONENT(wdsp, x) (&(wdsp->cmpnts[x])) +#define WDSP_GET_COMPONENT(wdsp, x) ((x >= WDSP_CMPNT_TYPE_MAX || x < 0) ? \ + NULL : (&(wdsp->cmpnts[x]))) #define WDSP_GET_CMPNT_TYPE_STR(x) wdsp_get_cmpnt_type_string(x) /* diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c index 10883b0939d6..2bc911e63e12 100644 --- a/sound/soc/codecs/wcd9335.c +++ b/sound/soc/codecs/wcd9335.c @@ -4073,6 +4073,8 @@ static int tasha_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w, snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0xC0, 0xC0); } set_bit(HPH_PA_DELAY, &tasha->status_mask); + if (!(strcmp(w->name, "HPHR PA"))) + snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x40); break; case SND_SOC_DAPM_POST_PMU: if (!(strcmp(w->name, "ANC HPHR PA"))) { @@ -4127,6 +4129,8 @@ static int tasha_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w, tasha_codec_hph_post_pa_config(tasha, hph_mode, event); if (!(strcmp(w->name, "ANC HPHR PA"))) snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x00); + if (!(strcmp(w->name, "HPHR PA"))) + snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x00); break; case SND_SOC_DAPM_POST_PMD: /* 5ms sleep is required after PA is disabled as per @@ -4166,6 +4170,8 @@ static int tasha_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w, (test_bit(HPH_PA_DELAY, &tasha->status_mask))) { snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0xC0, 0xC0); } + if (!(strcmp(w->name, "HPHL PA"))) + snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x80); set_bit(HPH_PA_DELAY, &tasha->status_mask); break; case SND_SOC_DAPM_POST_PMU: @@ -4222,6 +4228,8 @@ static int tasha_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w, tasha_codec_hph_post_pa_config(tasha, hph_mode, event); if (!(strcmp(w->name, "ANC HPHL PA"))) snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x00); + if (!(strcmp(w->name, "HPHL PA"))) + snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x00); break; case SND_SOC_DAPM_POST_PMD: /* 5ms sleep is required after PA is disabled as per @@ -4544,6 +4552,10 @@ static int tasha_codec_hphr_dac_event(struct snd_soc_dapm_widget *w, switch (event) { case SND_SOC_DAPM_PRE_PMU: + if (!(strcmp(w->name, "RX INT2 DAC"))) { + snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x20, 0x20); + snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x10, 0x10); + } if (tasha->anc_func) { ret = tasha_codec_enable_anc(w, kcontrol, event); /* 40 msec delay is needed to avoid click and pop */ @@ -4582,6 +4594,8 @@ static int tasha_codec_hphr_dac_event(struct snd_soc_dapm_widget *w, } break; case SND_SOC_DAPM_PRE_PMD: + if (!(strcmp(w->name, "RX INT2 DAC"))) + snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x30, 0x00); if ((hph_mode == CLS_H_LP) && (TASHA_IS_1_1(wcd9xxx))) { snd_soc_update_bits(codec, WCD9335_HPH_L_DAC_CTL, @@ -11094,12 +11108,12 @@ static const struct snd_soc_dapm_widget tasha_dapm_widgets[] = { 0, 0, tasha_codec_ear_dac_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, WCD9335_ANA_HPH, - 5, 0, tasha_codec_hphl_dac_event, + SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, SND_SOC_NOPM, + 0, 0, tasha_codec_hphl_dac_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, WCD9335_ANA_HPH, - 4, 0, tasha_codec_hphr_dac_event, + SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, SND_SOC_NOPM, + 0, 0, tasha_codec_hphr_dac_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_DAC_E("RX INT3 DAC", NULL, SND_SOC_NOPM, @@ -11114,11 +11128,11 @@ static const struct snd_soc_dapm_widget tasha_dapm_widgets[] = { SND_SOC_DAPM_DAC_E("RX INT6 DAC", NULL, SND_SOC_NOPM, 0, 0, tasha_codec_lineout_dac_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA_E("HPHL PA", WCD9335_ANA_HPH, 7, 0, NULL, 0, + SND_SOC_DAPM_PGA_E("HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0, tasha_codec_enable_hphl_pa, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA_E("HPHR PA", WCD9335_ANA_HPH, 6, 0, NULL, 0, + SND_SOC_DAPM_PGA_E("HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0, tasha_codec_enable_hphr_pa, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c index 26320fd01a5a..bfe471e73503 100644 --- a/sound/soc/codecs/wcd934x/wcd934x.c +++ b/sound/soc/codecs/wcd934x/wcd934x.c @@ -2014,6 +2014,8 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w, snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES, 0x02, 0x02); } + if (!(strcmp(w->name, "HPHR PA"))) + snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x40); break; case SND_SOC_DAPM_POST_PMU: if ((!(strcmp(w->name, "ANC HPHR PA")))) { @@ -2112,6 +2114,8 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w, 0x10, 0x10); if (!(strcmp(w->name, "ANC HPHR PA"))) snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00); + if (!(strcmp(w->name, "HPHR PA"))) + snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00); break; case SND_SOC_DAPM_POST_PMD: /* @@ -2161,6 +2165,8 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w, (test_bit(HPH_PA_DELAY, &tavil->status_mask))) snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0xC0, 0xC0); + if (!(strcmp(w->name, "HPHL PA"))) + snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x80, 0x80); set_bit(HPH_PA_DELAY, &tavil->status_mask); if (dsd_conf && (snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01)) { @@ -2266,6 +2272,8 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w, if (!(strcmp(w->name, "ANC HPHL PA"))) snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x80, 0x00); + if (!(strcmp(w->name, "HPHL PA"))) + snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x80, 0x00); break; case SND_SOC_DAPM_POST_PMD: /* @@ -2418,6 +2426,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w, switch (event) { case SND_SOC_DAPM_PRE_PMU: + if (!(strcmp(w->name, "RX INT2 DAC"))) { + snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x20, 0x20); + snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x10, 0x10); + } if (tavil->anc_func) { ret = tavil_codec_enable_anc(w, kcontrol, event); /* 40 msec delay is needed to avoid click and pop */ @@ -2458,6 +2470,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w, WCD934X_CDC_RX2_RX_PATH_CFG0, 0x10, 0x10); break; + case SND_SOC_DAPM_PRE_PMD: + if (!(strcmp(w->name, "RX INT2 DAC"))) + snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x30, 0x00); + break; case SND_SOC_DAPM_POST_PMD: /* 1000us required as per HW requirement */ usleep_range(1000, 1100); @@ -7365,12 +7381,12 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = { 0, 0, tavil_codec_ear_dac_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, WCD934X_ANA_HPH, - 5, 0, tavil_codec_hphl_dac_event, + SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, SND_SOC_NOPM, + 0, 0, tavil_codec_hphl_dac_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, WCD934X_ANA_HPH, - 4, 0, tavil_codec_hphr_dac_event, + SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, SND_SOC_NOPM, + 0, 0, tavil_codec_hphr_dac_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_DAC_E("RX INT3 DAC", NULL, SND_SOC_NOPM, @@ -7383,11 +7399,11 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = { SND_SOC_DAPM_PGA_E("EAR PA", WCD934X_ANA_EAR, 7, 0, NULL, 0, tavil_codec_enable_ear_pa, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA_E("HPHL PA", WCD934X_ANA_HPH, 7, 0, NULL, 0, + SND_SOC_DAPM_PGA_E("HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0, tavil_codec_enable_hphl_pa, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA_E("HPHR PA", WCD934X_ANA_HPH, 6, 0, NULL, 0, + SND_SOC_DAPM_PGA_E("HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0, tavil_codec_enable_hphr_pa, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c index 058f6c5fa676..e4f6df077d98 100644 --- a/sound/soc/codecs/wsa881x.c +++ b/sound/soc/codecs/wsa881x.c @@ -968,6 +968,8 @@ static void wsa881x_init(struct snd_soc_codec *codec) wsa881x->version = snd_soc_read(codec, WSA881X_CHIP_ID1); wsa881x_regmap_defaults(wsa881x->regmap, wsa881x->version); + /* Enable software reset output from soundwire slave */ + snd_soc_update_bits(codec, WSA881X_SWR_RESET_EN, 0x07, 0x07); /* Bring out of analog reset */ snd_soc_update_bits(codec, WSA881X_CDC_RST_CTL, 0x02, 0x02); /* Bring out of digital reset */ diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index 54c33204541f..ff6fcd9f92f7 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -100,7 +100,7 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream, if (ret && ret != -ENOTSUPP) goto err; } - + return 0; err: return ret; } diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig index eb650d19a97c..629a9c3d91db 100644 --- a/sound/soc/msm/Kconfig +++ b/sound/soc/msm/Kconfig @@ -166,7 +166,7 @@ config SND_SOC_EXT_CODEC config SND_SOC_MSM8996 tristate "SoC Machine driver for MSM8996 boards" - depends on ARCH_MSM8996 + depends on ARCH_MSM8996 || MSM_GVM_QUIN select SND_SOC_COMPRESS select SND_SOC_QDSP6V2 select SND_SOC_MSM_STUB @@ -176,7 +176,7 @@ config SND_SOC_MSM8996 select MSM_QDSP6V2_CODECS select SND_SOC_WCD9335 select SND_SOC_WSA881X - select SND_SOC_MSM_HDMI_CODEC_RX + select SND_SOC_MSM_HDMI_CODEC_RX if ARCH_MSM8996 select DTS_SRS_TM select QTI_PP select QTI_PP_AUDIOSPHERE diff --git a/sound/soc/msm/apq8096-auto.c b/sound/soc/msm/apq8096-auto.c index b1dff8764618..749f386852c6 100644 --- a/sound/soc/msm/apq8096-auto.c +++ b/sound/soc/msm/apq8096-auto.c @@ -61,6 +61,16 @@ static int msm_quat_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE; static int msm_sec_mi2s_rate = SAMPLING_RATE_48KHZ; /* TDM default channels */ +static int msm_pri_tdm_tx_0_ch = 2; +static int msm_pri_tdm_tx_1_ch = 2; +static int msm_pri_tdm_tx_2_ch = 2; +static int msm_pri_tdm_tx_3_ch = 2; + +static int msm_pri_tdm_rx_0_ch = 2; +static int msm_pri_tdm_rx_1_ch = 2; +static int msm_pri_tdm_rx_2_ch = 2; +static int msm_pri_tdm_rx_3_ch = 2; + static int msm_sec_tdm_tx_0_ch = 2; /* STEREO MIC */ static int msm_sec_tdm_tx_1_ch = 2; static int msm_sec_tdm_tx_2_ch = 2; @@ -88,6 +98,16 @@ static int msm_quat_tdm_tx_2_ch = 2; /*ENT RECORD*/ static int msm_quat_tdm_tx_3_ch; /* TDM default bit format */ +static int msm_pri_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE; +static int msm_pri_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE; +static int msm_pri_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE; +static int msm_pri_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE; + +static int msm_pri_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE; +static int msm_pri_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE; +static int msm_pri_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE; +static int msm_pri_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE; + static int msm_sec_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE; static int msm_sec_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE; static int msm_sec_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE; @@ -114,6 +134,10 @@ static int msm_quat_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE; static int msm_quat_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE; static int msm_quat_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE; +static int msm_pri_tdm_rate = SAMPLING_RATE_48KHZ; +static int msm_pri_tdm_slot_width = 32; +static int msm_pri_tdm_slot_num = 8; + /* EC Reference default values are set in mixer_paths.xml */ static int msm_ec_ref_ch = 4; static int msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S16_LE; @@ -174,11 +198,26 @@ enum { SECONDARY_TDM_TX_5, SECONDARY_TDM_TX_6, SECONDARY_TDM_TX_7, + PRIMARY_TDM_RX_0, + PRIMARY_TDM_RX_1, + PRIMARY_TDM_RX_2, + PRIMARY_TDM_RX_3, + PRIMARY_TDM_RX_4, + PRIMARY_TDM_RX_5, + PRIMARY_TDM_RX_6, + PRIMARY_TDM_RX_7, + PRIMARY_TDM_TX_0, + PRIMARY_TDM_TX_1, + PRIMARY_TDM_TX_2, + PRIMARY_TDM_TX_3, + PRIMARY_TDM_TX_4, + PRIMARY_TDM_TX_5, + PRIMARY_TDM_TX_6, + PRIMARY_TDM_TX_7, TDM_MAX, }; #define TDM_SLOT_OFFSET_MAX 8 - /* TDM default offset */ static unsigned int tdm_slot_offset[TDM_MAX][TDM_SLOT_OFFSET_MAX] = { /* QUAT_TDM_RX */ @@ -235,6 +274,24 @@ static unsigned int tdm_slot_offset[TDM_MAX][TDM_SLOT_OFFSET_MAX] = { {0xFFFF}, /* not used */ {0xFFFF}, /* not used */ {0xFFFF}, /* not used */ + /* PRI_TDM_RX */ + {0, 4, 0xFFFF}, + {8, 12, 0xFFFF}, + {16, 20, 0xFFFF}, + {24, 28, 0xFFFF}, + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + /* PRI_TDM_TX */ + {0, 4, 0xFFFF}, + {8, 12, 0xFFFF}, + {16, 20, 0xFFFF}, + {24, 28, 0xFFFF}, + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ }; @@ -300,6 +357,24 @@ static unsigned int tdm_slot_offset_adp_mmxf[TDM_MAX][TDM_SLOT_OFFSET_MAX] = { {0xFFFF}, /* not used */ {0xFFFF}, /* not used */ {0xFFFF}, /* not used */ + /* PRI_TDM_RX */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + /* PRI_TDM_TX */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ }; static unsigned int tdm_slot_offset_custom[TDM_MAX][TDM_SLOT_OFFSET_MAX] = { @@ -357,6 +432,24 @@ static unsigned int tdm_slot_offset_custom[TDM_MAX][TDM_SLOT_OFFSET_MAX] = { {0xFFFF}, /* not used */ {0xFFFF}, /* not used */ {0xFFFF}, /* not used */ + /* PRI_TDM_RX */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + /* PRI_TDM_TX */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ + {0xFFFF}, /* not used */ }; static char const *hdmi_rx_ch_text[] = {"Two", "Three", "Four", "Five", @@ -389,6 +482,14 @@ static const char *const ec_ref_rate_text[] = {"0", "8000", "16000", static const char *const mi2s_rate_text[] = {"32000", "44100", "48000"}; +static const char *const pri_tdm_rate_text[] = {"8000", "16000", "48000"}; + +static const char *const pri_tdm_slot_num_text[] = {"One", "Two", "Four", + "Eight", "Sixteen", "Thirtytwo"}; + + +static const char *const pri_tdm_slot_width_text[] = {"16", "24", "32"}; + static struct afe_clk_set sec_mi2s_tx_clk = { AFE_API_VERSION_I2S_CONFIG, Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT, @@ -698,6 +799,150 @@ static int msm_sec_mi2s_tx_bit_format_put(struct snd_kcontrol *kcontrol, return 0; } +static int msm_pri_tdm_tx_0_ch_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + pr_debug("%s: msm_pri_tdm_tx_0_ch = %d\n", __func__, + msm_pri_tdm_tx_0_ch); + ucontrol->value.integer.value[0] = msm_pri_tdm_tx_0_ch - 1; + return 0; +} + +static int msm_pri_tdm_tx_0_ch_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + msm_pri_tdm_tx_0_ch = ucontrol->value.integer.value[0] + 1; + pr_debug("%s: msm_pri_tdm_tx_0_ch = %d\n", __func__, + msm_pri_tdm_tx_0_ch); + return 0; +} + +static int msm_pri_tdm_tx_1_ch_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + pr_debug("%s: pri_tdm_tx_1_ch = %d\n", __func__, + msm_pri_tdm_tx_1_ch); + ucontrol->value.integer.value[0] = msm_pri_tdm_tx_1_ch - 1; + return 0; +} + +static int msm_pri_tdm_tx_1_ch_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + msm_pri_tdm_tx_1_ch = ucontrol->value.integer.value[0] + 1; + pr_debug("%s: msm_pri_tdm_tx_1_ch = %d\n", __func__, + msm_pri_tdm_tx_1_ch); + return 0; +} + +static int msm_pri_tdm_tx_2_ch_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + pr_debug("%s: msm_pri_tdm_tx_2_ch = %d\n", __func__, + msm_pri_tdm_tx_2_ch); + ucontrol->value.integer.value[0] = msm_pri_tdm_tx_2_ch - 1; + return 0; +} + +static int msm_pri_tdm_tx_2_ch_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + msm_pri_tdm_tx_2_ch = ucontrol->value.integer.value[0] + 1; + pr_debug("%s: msm_pri_tdm_tx_2_ch = %d\n", __func__, + msm_pri_tdm_tx_2_ch); + return 0; +} + +static int msm_pri_tdm_tx_3_ch_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + pr_debug("%s: msm_pri_tdm_tx_3_ch = %d\n", __func__, + msm_pri_tdm_tx_3_ch); + ucontrol->value.integer.value[0] = msm_pri_tdm_tx_3_ch - 1; + return 0; +} + +static int msm_pri_tdm_tx_3_ch_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + msm_pri_tdm_tx_3_ch = ucontrol->value.integer.value[0] + 1; + pr_debug("%s: msm_pri_tdm_tx_3_ch = %d\n", __func__, + msm_pri_tdm_tx_3_ch); + return 0; +} + +static int msm_pri_tdm_rx_0_ch_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + pr_debug("%s: msm_pri_tdm_rx_0_ch = %d\n", __func__, + msm_pri_tdm_rx_0_ch); + ucontrol->value.integer.value[0] = msm_pri_tdm_rx_0_ch - 1; + return 0; +} + +static int msm_pri_tdm_rx_0_ch_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + msm_pri_tdm_rx_0_ch = ucontrol->value.integer.value[0] + 1; + pr_debug("%s: msm_pri_tdm_rx_0_ch = %d\n", __func__, + msm_pri_tdm_rx_0_ch); + return 0; +} + +static int msm_pri_tdm_rx_1_ch_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + pr_debug("%s: msm_pri_tdm_rx_1_ch = %d\n", __func__, + msm_pri_tdm_rx_1_ch); + ucontrol->value.integer.value[0] = msm_pri_tdm_rx_1_ch - 1; + return 0; +} + +static int msm_pri_tdm_rx_1_ch_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + msm_pri_tdm_rx_1_ch = ucontrol->value.integer.value[0] + 1; + pr_debug("%s: msm_pri_tdm_rx_1_ch = %d\n", __func__, + msm_pri_tdm_rx_1_ch); + return 0; +} + +static int msm_pri_tdm_rx_2_ch_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + pr_debug("%s: msm_pri_tdm_rx_2_ch = %d\n", __func__, + msm_pri_tdm_rx_2_ch); + ucontrol->value.integer.value[0] = msm_pri_tdm_rx_2_ch - 1; + return 0; +} + +static int msm_pri_tdm_rx_2_ch_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + msm_pri_tdm_rx_2_ch = ucontrol->value.integer.value[0] + 1; + pr_debug("%s: msm_pri_tdm_rx_2_ch = %d\n", __func__, + msm_pri_tdm_rx_2_ch); + return 0; +} + +static int msm_pri_tdm_rx_3_ch_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + pr_debug("%s: msm_pri_tdm_rx_3_ch = %d\n", __func__, + msm_pri_tdm_rx_3_ch); + ucontrol->value.integer.value[0] = msm_pri_tdm_rx_3_ch - 1; + return 0; +} + +static int msm_pri_tdm_rx_3_ch_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + msm_pri_tdm_rx_3_ch = ucontrol->value.integer.value[0] + 1; + pr_debug("%s: msm_pri_tdm_rx_3_ch = %d\n", __func__, + msm_pri_tdm_rx_3_ch); + return 0; +} + static int msm_sec_mi2s_rate_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -728,6 +973,174 @@ static int msm_sec_mi2s_rate_put(struct snd_kcontrol *kcontrol, return 0; } +static int msm_pri_tdm_rate_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + ucontrol->value.integer.value[0] = msm_pri_tdm_rate; + pr_debug("%s: msm_pri_tdm_rate = %d\n", __func__, msm_pri_tdm_rate); + return 0; +} + +static int msm_pri_tdm_rate_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 0: + msm_pri_tdm_rate = SAMPLING_RATE_8KHZ; + break; + case 1: + msm_pri_tdm_rate = SAMPLING_RATE_16KHZ; + break; + case 2: + msm_pri_tdm_rate = SAMPLING_RATE_48KHZ; + break; + default: + msm_pri_tdm_rate = SAMPLING_RATE_48KHZ; + break; + } + pr_debug("%s: msm_pri_tdm_rate = %d\n", + __func__, msm_pri_tdm_rate); + return 0; +} + +static int msm_pri_tdm_slot_width_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + ucontrol->value.integer.value[0] = msm_pri_tdm_slot_width; + pr_debug("%s: msm_pri_tdm_slot_width = %d\n", + __func__, msm_pri_tdm_slot_width); + return 0; +} + +static int msm_pri_tdm_slot_width_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 0: + msm_pri_tdm_slot_width = 16; + break; + case 1: + msm_pri_tdm_slot_width = 24; + break; + case 2: + msm_pri_tdm_slot_width = 32; + break; + default: + msm_pri_tdm_slot_width = 32; + break; + } + pr_debug("%s: msm_pri_tdm_slot_width= %d\n", + __func__, msm_pri_tdm_slot_width); + return 0; +} + +static int msm_pri_tdm_slot_num_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (msm_pri_tdm_slot_num) { + case 1: + ucontrol->value.integer.value[0] = 0; + break; + case 2: + ucontrol->value.integer.value[0] = 1; + break; + case 4: + ucontrol->value.integer.value[0] = 2; + break; + case 8: + ucontrol->value.integer.value[0] = 3; + break; + case 16: + ucontrol->value.integer.value[0] = 4; + break; + case 32: + default: + ucontrol->value.integer.value[0] = 5; + break; + } + + pr_debug("%s: msm_pri_tdm_slot_num = %d\n", + __func__, msm_pri_tdm_slot_num); + return 0; +} + +static int msm_pri_tdm_slot_num_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 0: + msm_pri_tdm_slot_num = 1; + break; + case 1: + msm_pri_tdm_slot_num = 2; + break; + case 2: + msm_pri_tdm_slot_num = 4; + break; + case 3: + msm_pri_tdm_slot_num = 8; + break; + case 4: + msm_pri_tdm_slot_num = 16; + break; + case 5: + msm_pri_tdm_slot_num = 32; + break; + default: + msm_pri_tdm_slot_num = 8; + break; + } + pr_debug("%s: msm_pri_tdm_slot_num = %d\n", + __func__, msm_pri_tdm_slot_num); + return 0; +} + +static int msm_tdm_slot_mapping_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct soc_multi_mixer_control *mc = + (struct soc_multi_mixer_control *)kcontrol->private_value; + unsigned int *slot_offset; + int i; + + if (mc->shift >= TDM_MAX) { + pr_err("%s invalid port index %d\n", __func__, mc->shift); + return -EINVAL; + } + + slot_offset = tdm_slot_offset[mc->shift]; + for (i = 0; i < TDM_SLOT_OFFSET_MAX; i++) { + ucontrol->value.integer.value[i] = slot_offset[i]; + pr_debug("%s port index %d offset %d value %d\n", + __func__, mc->shift, i, slot_offset[i]); + } + + return 0; +} + +static int msm_tdm_slot_mapping_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct soc_multi_mixer_control *mc = + (struct soc_multi_mixer_control *)kcontrol->private_value; + unsigned int *slot_offset; + int i; + + if (mc->shift >= TDM_MAX) { + pr_err("%s invalid port index %d\n", __func__, mc->shift); + return -EINVAL; + } + + slot_offset = tdm_slot_offset[mc->shift]; + + for (i = 0; i < TDM_SLOT_OFFSET_MAX; i++) { + slot_offset[i] = ucontrol->value.integer.value[i]; + pr_debug("%s port index %d offset %d value %d\n", + __func__, mc->shift, i, slot_offset[i]); + } + + return 0; +} static int msm_sec_tdm_tx_0_ch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) @@ -1107,6 +1520,278 @@ static int msm_quat_tdm_tx_3_ch_put(struct snd_kcontrol *kcontrol, return 0; } +static int msm_pri_tdm_tx_0_bit_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (msm_pri_tdm_tx_0_bit_format) { + case SNDRV_PCM_FORMAT_S24_LE: + ucontrol->value.integer.value[0] = 1; + break; + case SNDRV_PCM_FORMAT_S16_LE: + default: + ucontrol->value.integer.value[0] = 0; + break; + } + pr_debug("%s: msm_pri_tdm_tx_0_bit_format = %ld\n", + __func__, ucontrol->value.integer.value[0]); + return 0; +} + +static int msm_pri_tdm_tx_0_bit_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 1: + msm_pri_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE; + break; + case 0: + default: + msm_pri_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE; + break; + } + pr_debug("%s: msm_pri_tdm_tx_0_bit_format = %d\n", + __func__, msm_pri_tdm_tx_0_bit_format); + return 0; +} + +static int msm_pri_tdm_tx_1_bit_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (msm_pri_tdm_tx_1_bit_format) { + case SNDRV_PCM_FORMAT_S24_LE: + ucontrol->value.integer.value[0] = 1; + break; + case SNDRV_PCM_FORMAT_S16_LE: + default: + ucontrol->value.integer.value[0] = 0; + break; + } + pr_debug("%s: msm_pri_tdm_tx_1_bit_format = %ld\n", + __func__, ucontrol->value.integer.value[0]); + return 0; +} + +static int msm_pri_tdm_tx_1_bit_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 1: + msm_pri_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE; + break; + case 0: + default: + msm_pri_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE; + break; + } + pr_debug("%s: msm_pri_tdm_tx_1_bit_format = %d\n", + __func__, msm_pri_tdm_tx_1_bit_format); + return 0; +} + +static int msm_pri_tdm_tx_2_bit_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (msm_pri_tdm_tx_2_bit_format) { + case SNDRV_PCM_FORMAT_S24_LE: + ucontrol->value.integer.value[0] = 1; + break; + case SNDRV_PCM_FORMAT_S16_LE: + default: + ucontrol->value.integer.value[0] = 0; + break; + } + pr_debug("%s: msm_pri_tdm_tx_2_bit_format = %ld\n", + __func__, ucontrol->value.integer.value[0]); + return 0; +} + +static int msm_pri_tdm_tx_2_bit_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 1: + msm_pri_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE; + break; + case 0: + default: + msm_pri_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE; + break; + } + pr_debug("%s: msm_pri_tdm_tx_2_bit_format = %d\n", + __func__, msm_pri_tdm_tx_2_bit_format); + return 0; +} + +static int msm_pri_tdm_tx_3_bit_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (msm_pri_tdm_tx_3_bit_format) { + case SNDRV_PCM_FORMAT_S24_LE: + ucontrol->value.integer.value[0] = 1; + break; + case SNDRV_PCM_FORMAT_S16_LE: + default: + ucontrol->value.integer.value[0] = 0; + break; + } + pr_debug("%s: msm_pri_tdm_tx_3_bit_format = %ld\n", + __func__, ucontrol->value.integer.value[0]); + return 0; +} + +static int msm_pri_tdm_tx_3_bit_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 1: + msm_pri_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE; + break; + case 0: + default: + msm_pri_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE; + break; + } + pr_debug("%s: msm_pri_tdm_tx_3_bit_format = %d\n", + __func__, msm_pri_tdm_tx_3_bit_format); + return 0; +} + +static int msm_pri_tdm_rx_0_bit_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (msm_pri_tdm_rx_0_bit_format) { + case SNDRV_PCM_FORMAT_S24_LE: + ucontrol->value.integer.value[0] = 1; + break; + case SNDRV_PCM_FORMAT_S16_LE: + default: + ucontrol->value.integer.value[0] = 0; + break; + } + pr_debug("%s: msm_pri_tdm_rx_0_bit_format = %ld\n", + __func__, ucontrol->value.integer.value[0]); + return 0; +} + +static int msm_pri_tdm_rx_0_bit_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 1: + msm_pri_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE; + break; + case 0: + default: + msm_pri_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE; + break; + } + pr_debug("%s: msm_pri_tdm_rx_0_bit_format = %d\n", + __func__, msm_pri_tdm_rx_0_bit_format); + return 0; +} + +static int msm_pri_tdm_rx_1_bit_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (msm_pri_tdm_rx_1_bit_format) { + case SNDRV_PCM_FORMAT_S24_LE: + ucontrol->value.integer.value[0] = 1; + break; + case SNDRV_PCM_FORMAT_S16_LE: + default: + ucontrol->value.integer.value[0] = 0; + break; + } + pr_debug("%s: msm_pri_tdm_rx_1_bit_format = %ld\n", + __func__, ucontrol->value.integer.value[0]); + return 0; +} + +static int msm_pri_tdm_rx_1_bit_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 1: + msm_pri_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE; + break; + case 0: + default: + msm_pri_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE; + break; + } + pr_debug("%s: msm_pri_tdm_rx_1_bit_format = %d\n", + __func__, msm_pri_tdm_rx_1_bit_format); + return 0; +} + +static int msm_pri_tdm_rx_2_bit_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (msm_pri_tdm_rx_2_bit_format) { + case SNDRV_PCM_FORMAT_S24_LE: + ucontrol->value.integer.value[0] = 1; + break; + case SNDRV_PCM_FORMAT_S16_LE: + default: + ucontrol->value.integer.value[0] = 0; + break; + } + pr_debug("%s: msm_pri_tdm_rx_2_bit_format = %ld\n", + __func__, ucontrol->value.integer.value[0]); + return 0; +} + +static int msm_pri_tdm_rx_2_bit_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 1: + msm_pri_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE; + break; + case 0: + default: + msm_pri_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE; + break; + } + pr_debug("%s: msm_pri_tdm_rx_2_bit_format = %d\n", + __func__, msm_pri_tdm_rx_2_bit_format); + return 0; +} + +static int msm_pri_tdm_rx_3_bit_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (msm_pri_tdm_rx_3_bit_format) { + case SNDRV_PCM_FORMAT_S24_LE: + ucontrol->value.integer.value[0] = 1; + break; + case SNDRV_PCM_FORMAT_S16_LE: + default: + ucontrol->value.integer.value[0] = 0; + break; + } + pr_debug("%s: msm_pri_tdm_rx_3_bit_format = %ld\n", + __func__, ucontrol->value.integer.value[0]); + return 0; +} + +static int msm_pri_tdm_rx_3_bit_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + switch (ucontrol->value.integer.value[0]) { + case 1: + msm_pri_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE; + break; + case 0: + default: + msm_pri_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE; + break; + } + pr_debug("%s: msm_pri_tdm_rx_3_bit_format = %d\n", + __func__, msm_pri_tdm_rx_3_bit_format); + return 0; +} + static int msm_sec_tdm_tx_0_bit_format_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -2070,7 +2755,57 @@ static int msm_tdm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + rate->min = rate->max = SAMPLING_RATE_48KHZ; + switch (cpu_dai->id) { + case AFE_PORT_ID_PRIMARY_TDM_TX: + channels->min = channels->max = msm_pri_tdm_tx_0_ch; + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + msm_pri_tdm_tx_0_bit_format); + rate->min = rate->max = msm_pri_tdm_rate; + break; + case AFE_PORT_ID_PRIMARY_TDM_TX_1: + channels->min = channels->max = msm_pri_tdm_tx_1_ch; + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + msm_pri_tdm_tx_1_bit_format); + rate->min = rate->max = msm_pri_tdm_rate; + break; + case AFE_PORT_ID_PRIMARY_TDM_TX_2: + channels->min = channels->max = msm_pri_tdm_tx_2_ch; + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + msm_pri_tdm_tx_2_bit_format); + rate->min = rate->max = msm_pri_tdm_rate; + break; + case AFE_PORT_ID_PRIMARY_TDM_TX_3: + channels->min = channels->max = msm_pri_tdm_tx_3_ch; + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + msm_pri_tdm_tx_3_bit_format); + rate->min = rate->max = msm_pri_tdm_rate; + break; + case AFE_PORT_ID_PRIMARY_TDM_RX: + channels->min = channels->max = msm_pri_tdm_rx_0_ch; + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + msm_pri_tdm_rx_0_bit_format); + rate->min = rate->max = msm_pri_tdm_rate; + break; + case AFE_PORT_ID_PRIMARY_TDM_RX_1: + channels->min = channels->max = msm_pri_tdm_rx_1_ch; + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + msm_pri_tdm_rx_1_bit_format); + rate->min = rate->max = msm_pri_tdm_rate; + break; + case AFE_PORT_ID_PRIMARY_TDM_RX_2: + channels->min = channels->max = msm_pri_tdm_rx_2_ch; + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + msm_pri_tdm_rx_2_bit_format); + rate->min = rate->max = msm_pri_tdm_rate; + break; + case AFE_PORT_ID_PRIMARY_TDM_RX_3: + channels->min = channels->max = msm_pri_tdm_rx_3_ch; + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + msm_pri_tdm_rx_3_bit_format); + rate->min = rate->max = msm_pri_tdm_rate; + break; case AFE_PORT_ID_SECONDARY_TDM_TX: channels->min = channels->max = msm_sec_tdm_tx_0_ch; param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, @@ -2181,7 +2916,6 @@ static int msm_tdm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, __func__, cpu_dai->id); return -EINVAL; } - rate->min = rate->max = SAMPLING_RATE_48KHZ; pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n", __func__, cpu_dai->id, channels->max, rate->max, @@ -2300,99 +3034,18 @@ static struct snd_soc_ops apq8096_mi2s_be_ops = { .shutdown = apq8096_mi2s_snd_shutdown, }; -static unsigned int tdm_param_set_slot_mask(u16 port_id, - int slot_width, int slots) +static unsigned int tdm_param_set_slot_mask(int slots) { unsigned int slot_mask = 0; - int upper, lower, i, j; - unsigned int *slot_offset; + unsigned int i = 0; - switch (port_id) { - case AFE_PORT_ID_SECONDARY_TDM_RX: - case AFE_PORT_ID_SECONDARY_TDM_RX_1: - case AFE_PORT_ID_SECONDARY_TDM_RX_2: - case AFE_PORT_ID_SECONDARY_TDM_RX_3: - case AFE_PORT_ID_SECONDARY_TDM_RX_4: - case AFE_PORT_ID_SECONDARY_TDM_RX_5: - case AFE_PORT_ID_SECONDARY_TDM_RX_6: - case AFE_PORT_ID_SECONDARY_TDM_RX_7: - lower = SECONDARY_TDM_RX_0; - upper = SECONDARY_TDM_RX_7; - break; - case AFE_PORT_ID_SECONDARY_TDM_TX: - case AFE_PORT_ID_SECONDARY_TDM_TX_1: - case AFE_PORT_ID_SECONDARY_TDM_TX_2: - case AFE_PORT_ID_SECONDARY_TDM_TX_3: - case AFE_PORT_ID_SECONDARY_TDM_TX_4: - case AFE_PORT_ID_SECONDARY_TDM_TX_5: - case AFE_PORT_ID_SECONDARY_TDM_TX_6: - case AFE_PORT_ID_SECONDARY_TDM_TX_7: - lower = SECONDARY_TDM_TX_0; - upper = SECONDARY_TDM_TX_7; - break; - case AFE_PORT_ID_TERTIARY_TDM_RX: - case AFE_PORT_ID_TERTIARY_TDM_RX_1: - case AFE_PORT_ID_TERTIARY_TDM_RX_2: - case AFE_PORT_ID_TERTIARY_TDM_RX_3: - case AFE_PORT_ID_TERTIARY_TDM_RX_4: - case AFE_PORT_ID_TERTIARY_TDM_RX_5: - case AFE_PORT_ID_TERTIARY_TDM_RX_6: - case AFE_PORT_ID_TERTIARY_TDM_RX_7: - lower = TERTIARY_TDM_RX_0; - upper = TERTIARY_TDM_RX_7; - break; - case AFE_PORT_ID_TERTIARY_TDM_TX: - case AFE_PORT_ID_TERTIARY_TDM_TX_1: - case AFE_PORT_ID_TERTIARY_TDM_TX_2: - case AFE_PORT_ID_TERTIARY_TDM_TX_3: - case AFE_PORT_ID_TERTIARY_TDM_TX_4: - case AFE_PORT_ID_TERTIARY_TDM_TX_5: - case AFE_PORT_ID_TERTIARY_TDM_TX_6: - case AFE_PORT_ID_TERTIARY_TDM_TX_7: - lower = TERTIARY_TDM_TX_0; - upper = TERTIARY_TDM_TX_7; - break; - case AFE_PORT_ID_QUATERNARY_TDM_RX: - case AFE_PORT_ID_QUATERNARY_TDM_RX_1: - case AFE_PORT_ID_QUATERNARY_TDM_RX_2: - case AFE_PORT_ID_QUATERNARY_TDM_RX_3: - case AFE_PORT_ID_QUATERNARY_TDM_RX_4: - case AFE_PORT_ID_QUATERNARY_TDM_RX_5: - case AFE_PORT_ID_QUATERNARY_TDM_RX_6: - case AFE_PORT_ID_QUATERNARY_TDM_RX_7: - lower = QUATERNARY_TDM_RX_0; - upper = QUATERNARY_TDM_RX_7; - break; - case AFE_PORT_ID_QUATERNARY_TDM_TX: - case AFE_PORT_ID_QUATERNARY_TDM_TX_1: - case AFE_PORT_ID_QUATERNARY_TDM_TX_2: - case AFE_PORT_ID_QUATERNARY_TDM_TX_3: - case AFE_PORT_ID_QUATERNARY_TDM_TX_4: - case AFE_PORT_ID_QUATERNARY_TDM_TX_5: - case AFE_PORT_ID_QUATERNARY_TDM_TX_6: - case AFE_PORT_ID_QUATERNARY_TDM_TX_7: - lower = QUATERNARY_TDM_TX_0; - upper = QUATERNARY_TDM_TX_7; - break; - default: - return slot_mask; - } - - for (i = lower; i <= upper; i++) { - slot_offset = tdm_slot_offset[i]; - for (j = 0; j < TDM_SLOT_OFFSET_MAX; j++) { - if (slot_offset[j] != AFE_SLOT_MAPPING_OFFSET_INVALID) - /* - * set the mask of active slot according to - * the offset table for the group of devices - */ - slot_mask |= - (1 << ((slot_offset[j] * 8) / slot_width)); - else - break; - } + if ((slots != 16) && (slots != 8)) { + pr_err("%s: invalid slot number %d\n", __func__, slots); + return -EINVAL; } + for (i = 0; i < slots ; i++) + slot_mask |= 1 << i; return slot_mask; } @@ -2402,14 +3055,16 @@ static int apq8096_tdm_snd_hw_params(struct snd_pcm_substream *substream, struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret = 0; - int channels, slot_width, slots; + int channels, slot_width, slots, rate; unsigned int slot_mask; unsigned int *slot_offset; int offset_channels = 0; int i; + int clk_freq; pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id); + rate = params_rate(params); channels = params_channels(params); if (channels < 1 || channels > 8) { pr_err("%s: invalid param channels %d\n", @@ -2435,15 +3090,88 @@ static int apq8096_tdm_snd_hw_params(struct snd_pcm_substream *substream, } slots = msm_tdm_num_slots; - slot_mask = tdm_param_set_slot_mask(cpu_dai->id, - slot_width, slots); - if (!slot_mask) { - pr_err("%s: invalid slot_mask 0x%x\n", - __func__, slot_mask); - return -EINVAL; - } switch (cpu_dai->id) { + case AFE_PORT_ID_PRIMARY_TDM_RX: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_0]; + break; + case AFE_PORT_ID_PRIMARY_TDM_RX_1: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_1]; + break; + case AFE_PORT_ID_PRIMARY_TDM_RX_2: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_2]; + break; + case AFE_PORT_ID_PRIMARY_TDM_RX_3: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_3]; + break; + case AFE_PORT_ID_PRIMARY_TDM_RX_4: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_4]; + break; + case AFE_PORT_ID_PRIMARY_TDM_RX_5: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_5]; + break; + case AFE_PORT_ID_PRIMARY_TDM_RX_6: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_6]; + break; + case AFE_PORT_ID_PRIMARY_TDM_RX_7: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_7]; + break; + case AFE_PORT_ID_PRIMARY_TDM_TX: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_0]; + break; + case AFE_PORT_ID_PRIMARY_TDM_TX_1: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_1]; + break; + case AFE_PORT_ID_PRIMARY_TDM_TX_2: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_2]; + break; + case AFE_PORT_ID_PRIMARY_TDM_TX_3: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_3]; + break; + case AFE_PORT_ID_PRIMARY_TDM_TX_4: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_4]; + break; + case AFE_PORT_ID_PRIMARY_TDM_TX_5: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_5]; + break; + case AFE_PORT_ID_PRIMARY_TDM_TX_6: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_6]; + break; + case AFE_PORT_ID_PRIMARY_TDM_TX_7: + slots = msm_pri_tdm_slot_num; + slot_width = msm_pri_tdm_slot_width; + slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_7]; + break; case AFE_PORT_ID_SECONDARY_TDM_RX: slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_0]; break; @@ -2613,6 +3341,13 @@ static int apq8096_tdm_snd_hw_params(struct snd_pcm_substream *substream, return -EINVAL; } + slot_mask = tdm_param_set_slot_mask(slots); + if (!slot_mask) { + pr_err("%s: invalid slot_mask 0x%x\n", + __func__, slot_mask); + return -EINVAL; + } + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask, slots, slot_width); @@ -2647,6 +3382,13 @@ static int apq8096_tdm_snd_hw_params(struct snd_pcm_substream *substream, } } + clk_freq = rate * slot_width * slots; + ret = snd_soc_dai_set_sysclk(cpu_dai, 0, clk_freq, SND_SOC_CLOCK_OUT); + if (ret < 0) { + pr_err("%s: failed to set tdm clk, err:%d\n", + __func__, ret); + } + end: return ret; } @@ -2668,6 +3410,9 @@ static const struct soc_enum msm_snd_enum[] = { SOC_ENUM_SINGLE_EXT(3, ec_ref_bit_format_text), SOC_ENUM_SINGLE_EXT(9, ec_ref_rate_text), SOC_ENUM_SINGLE_EXT(3, mi2s_rate_text), + SOC_ENUM_SINGLE_EXT(3, pri_tdm_rate_text), + SOC_ENUM_SINGLE_EXT(6, pri_tdm_slot_num_text), + SOC_ENUM_SINGLE_EXT(3, pri_tdm_slot_width_text), }; static const struct snd_kcontrol_new msm_snd_controls[] = { @@ -2681,6 +3426,22 @@ static const struct snd_kcontrol_new msm_snd_controls[] = { msm_proxy_rx_ch_get, msm_proxy_rx_ch_put), SOC_ENUM_EXT("HDMI_RX SampleRate", msm_snd_enum[4], hdmi_rx_sample_rate_get, hdmi_rx_sample_rate_put), + SOC_ENUM_EXT("PRI_TDM_TX_0 Channels", msm_snd_enum[5], + msm_pri_tdm_tx_0_ch_get, msm_pri_tdm_tx_0_ch_put), + SOC_ENUM_EXT("PRI_TDM_TX_1 Channels", msm_snd_enum[5], + msm_pri_tdm_tx_1_ch_get, msm_pri_tdm_tx_1_ch_put), + SOC_ENUM_EXT("PRI_TDM_TX_2 Channels", msm_snd_enum[5], + msm_pri_tdm_tx_2_ch_get, msm_pri_tdm_tx_2_ch_put), + SOC_ENUM_EXT("PRI_TDM_TX_3 Channels", msm_snd_enum[5], + msm_pri_tdm_tx_3_ch_get, msm_pri_tdm_tx_3_ch_put), + SOC_ENUM_EXT("PRI_TDM_RX_0 Channels", msm_snd_enum[5], + msm_pri_tdm_rx_0_ch_get, msm_pri_tdm_rx_0_ch_put), + SOC_ENUM_EXT("PRI_TDM_RX_1 Channels", msm_snd_enum[5], + msm_pri_tdm_rx_1_ch_get, msm_pri_tdm_rx_1_ch_put), + SOC_ENUM_EXT("PRI_TDM_RX_2 Channels", msm_snd_enum[5], + msm_pri_tdm_rx_2_ch_get, msm_pri_tdm_rx_2_ch_put), + SOC_ENUM_EXT("PRI_TDM_RX_3 Channels", msm_snd_enum[5], + msm_pri_tdm_rx_3_ch_get, msm_pri_tdm_rx_3_ch_put), SOC_ENUM_EXT("SEC_TDM_TX_0 Channels", msm_snd_enum[5], msm_sec_tdm_tx_0_ch_get, msm_sec_tdm_tx_0_ch_put), SOC_ENUM_EXT("SEC_TDM_TX_1 Channels", msm_snd_enum[5], @@ -2723,6 +3484,30 @@ static const struct snd_kcontrol_new msm_snd_controls[] = { msm_quat_tdm_tx_2_ch_get, msm_quat_tdm_tx_2_ch_put), SOC_ENUM_EXT("QUAT_TDM_TX_3 Channels", msm_snd_enum[5], msm_quat_tdm_tx_3_ch_get, msm_quat_tdm_tx_3_ch_put), + SOC_ENUM_EXT("PRI_TDM_TX_0 Bit Format", msm_snd_enum[6], + msm_pri_tdm_tx_0_bit_format_get, + msm_pri_tdm_tx_0_bit_format_put), + SOC_ENUM_EXT("PRI_TDM_TX_1 Bit Format", msm_snd_enum[6], + msm_pri_tdm_tx_1_bit_format_get, + msm_pri_tdm_tx_1_bit_format_put), + SOC_ENUM_EXT("PRI_TDM_TX_2 Bit Format", msm_snd_enum[6], + msm_pri_tdm_tx_2_bit_format_get, + msm_pri_tdm_tx_2_bit_format_put), + SOC_ENUM_EXT("PRI_TDM_TX_3 Bit Format", msm_snd_enum[6], + msm_pri_tdm_tx_3_bit_format_get, + msm_pri_tdm_tx_3_bit_format_put), + SOC_ENUM_EXT("PRI_TDM_RX_0 Bit Format", msm_snd_enum[6], + msm_pri_tdm_rx_0_bit_format_get, + msm_pri_tdm_rx_0_bit_format_put), + SOC_ENUM_EXT("PRI_TDM_RX_1 Bit Format", msm_snd_enum[6], + msm_pri_tdm_rx_1_bit_format_get, + msm_pri_tdm_rx_1_bit_format_put), + SOC_ENUM_EXT("PRI_TDM_RX_2 Bit Format", msm_snd_enum[6], + msm_pri_tdm_rx_2_bit_format_get, + msm_pri_tdm_rx_2_bit_format_put), + SOC_ENUM_EXT("PRI_TDM_RX_3 Bit Format", msm_snd_enum[6], + msm_pri_tdm_rx_3_bit_format_get, + msm_pri_tdm_rx_3_bit_format_put), SOC_ENUM_EXT("SEC_TDM_TX_0 Bit Format", msm_snd_enum[6], msm_sec_tdm_tx_0_bit_format_get, msm_sec_tdm_tx_0_bit_format_put), @@ -2797,6 +3582,268 @@ static const struct snd_kcontrol_new msm_snd_controls[] = { msm_sec_mi2s_tx_bit_format_put), SOC_ENUM_EXT("SEC_MI2S_TX SampleRate", msm_snd_enum[11], msm_sec_mi2s_rate_get, msm_sec_mi2s_rate_put), + SOC_ENUM_EXT("PRI_TDM SampleRate", msm_snd_enum[12], + msm_pri_tdm_rate_get, msm_pri_tdm_rate_put), + SOC_ENUM_EXT("PRI_TDM Slot Number", msm_snd_enum[13], + msm_pri_tdm_slot_num_get, msm_pri_tdm_slot_num_put), + SOC_ENUM_EXT("PRI_TDM Slot Width", msm_snd_enum[14], + msm_pri_tdm_slot_width_get, msm_pri_tdm_slot_width_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_0 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_RX_0, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_1 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_RX_1, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_2 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_RX_2, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_3 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_RX_3, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_4 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_RX_4, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_5 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_RX_5, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_6 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_RX_6, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_7 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_RX_7, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_0 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_TX_0, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_1 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_TX_1, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_2 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_TX_2, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_3 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_TX_3, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_4 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_TX_4, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_5 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_TX_5, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_6 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_TX_6, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_7 Slot Mapping", SND_SOC_NOPM, + PRIMARY_TDM_TX_7, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_0 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_RX_0, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_1 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_RX_1, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_2 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_RX_2, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_3 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_RX_3, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_4 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_RX_4, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_5 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_RX_5, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_6 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_RX_6, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_7 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_RX_7, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_0 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_TX_0, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_1 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_TX_1, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_2 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_TX_2, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_3 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_TX_3, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_4 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_TX_4, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_5 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_TX_5, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_6 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_TX_6, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_7 Slot Mapping", SND_SOC_NOPM, + SECONDARY_TDM_TX_7, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_0 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_RX_0, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_1 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_RX_1, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_2 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_RX_2, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_3 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_RX_3, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_4 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_RX_4, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_5 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_RX_5, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_6 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_RX_6, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_7 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_RX_7, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_0 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_TX_0, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_1 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_TX_1, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_2 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_TX_2, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_3 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_TX_3, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_4 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_TX_4, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_5 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_TX_5, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_6 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_TX_6, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_7 Slot Mapping", SND_SOC_NOPM, + TERTIARY_TDM_TX_7, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_0 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_RX_0, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_1 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_RX_1, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_2 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_RX_2, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_3 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_RX_3, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_4 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_RX_4, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_5 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_RX_5, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_6 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_RX_6, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_7 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_RX_7, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_0 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_TX_0, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_1 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_TX_1, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_2 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_TX_2, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_3 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_TX_3, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_4 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_TX_4, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_5 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_TX_5, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_6 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_TX_6, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), + SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_7 Slot Mapping", SND_SOC_NOPM, + QUATERNARY_TDM_TX_7, 0xFFFF, + 0, 8, msm_tdm_slot_mapping_get, + msm_tdm_slot_mapping_put), SOC_ENUM_EXT("EC Reference Channels", msm_snd_enum[8], msm_ec_ref_ch_get, msm_ec_ref_ch_put), SOC_ENUM_EXT("EC Reference Bit Format", msm_snd_enum[9], @@ -3806,6 +4853,126 @@ static struct snd_soc_dai_link apq8096_auto_fe_dai_links[] = { .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", }, + { + .name = "Primary TDM RX 0 Hostless", + .stream_name = "Primary TDM RX 0 Hostless", + .cpu_dai_name = "PRI_TDM_RX_0_HOSTLESS", + .platform_name = "msm-pcm-hostless", + .dynamic = 1, + .dpcm_playback = 1, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + }, + { + .name = "Primary TDM RX 1 Hostless", + .stream_name = "Primary TDM RX 1 Hostless", + .cpu_dai_name = "PRI_TDM_RX_1_HOSTLESS", + .platform_name = "msm-pcm-hostless", + .dynamic = 1, + .dpcm_playback = 1, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + }, + { + .name = "Primary TDM RX 2 Hostless", + .stream_name = "Primary TDM RX 2 Hostless", + .cpu_dai_name = "PRI_TDM_RX_2_HOSTLESS", + .platform_name = "msm-pcm-hostless", + .dynamic = 1, + .dpcm_playback = 1, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + }, + { + .name = "Primary TDM RX 3 Hostless", + .stream_name = "Primary TDM RX 3 Hostless", + .cpu_dai_name = "PRI_TDM_RX_3_HOSTLESS", + .platform_name = "msm-pcm-hostless", + .dynamic = 1, + .dpcm_playback = 1, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + }, + { + .name = "Primary TDM TX 0 Hostless", + .stream_name = "Primary TDM TX 0 Hostless", + .cpu_dai_name = "PRI_TDM_TX_0_HOSTLESS", + .platform_name = "msm-pcm-hostless", + .dynamic = 1, + .dpcm_capture = 1, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + }, + { + .name = "Primary TDM TX 1 Hostless", + .stream_name = "Primary TDM TX 1 Hostless", + .cpu_dai_name = "PRI_TDM_TX_1_HOSTLESS", + .platform_name = "msm-pcm-hostless", + .dynamic = 1, + .dpcm_capture = 1, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + }, + { + .name = "Primary TDM TX 2 Hostless", + .stream_name = "Primary TDM TX 2 Hostless", + .cpu_dai_name = "PRI_TDM_TX_2_HOSTLESS", + .platform_name = "msm-pcm-hostless", + .dynamic = 1, + .dpcm_capture = 1, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + }, + { + .name = "Primary TDM TX 3 Hostless", + .stream_name = "Primary TDM TX 3 Hostless", + .cpu_dai_name = "PRI_TDM_TX_3_HOSTLESS", + .platform_name = "msm-pcm-hostless", + .dynamic = 1, + .dpcm_capture = 1, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + } }; static struct snd_soc_dai_link apq8096_custom_fe_dai_links[] = { @@ -4452,6 +5619,118 @@ static struct snd_soc_dai_link apq8096_auto_be_dai_links[] = { .ops = &apq8096_tdm_be_ops, .ignore_suspend = 1, }, + { + .name = LPASS_BE_PRI_TDM_RX_0, + .stream_name = "Primary TDM0 Playback", + .cpu_dai_name = "msm-dai-q6-tdm.36864", + .platform_name = "msm-pcm-routing", + .codec_name = "msm-stub-codec.1", + .codec_dai_name = "msm-stub-rx", + .no_pcm = 1, + .dpcm_playback = 1, + .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_0, + .be_hw_params_fixup = msm_tdm_be_hw_params_fixup, + .ops = &apq8096_tdm_be_ops, + .ignore_suspend = 1, + }, + { + .name = LPASS_BE_PRI_TDM_RX_1, + .stream_name = "Primary TDM1 Playback", + .cpu_dai_name = "msm-dai-q6-tdm.36866", + .platform_name = "msm-pcm-routing", + .codec_name = "msm-stub-codec.1", + .codec_dai_name = "msm-stub-rx", + .no_pcm = 1, + .dpcm_playback = 1, + .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_1, + .be_hw_params_fixup = msm_tdm_be_hw_params_fixup, + .ops = &apq8096_tdm_be_ops, + .ignore_suspend = 1, + }, + { + .name = LPASS_BE_PRI_TDM_RX_2, + .stream_name = "Primary TDM2 Playback", + .cpu_dai_name = "msm-dai-q6-tdm.36868", + .platform_name = "msm-pcm-routing", + .codec_name = "msm-stub-codec.1", + .codec_dai_name = "msm-stub-rx", + .no_pcm = 1, + .dpcm_playback = 1, + .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_2, + .be_hw_params_fixup = msm_tdm_be_hw_params_fixup, + .ops = &apq8096_tdm_be_ops, + .ignore_suspend = 1, + }, + { + .name = LPASS_BE_PRI_TDM_RX_3, + .stream_name = "Primary TDM3 Playback", + .cpu_dai_name = "msm-dai-q6-tdm.36870", + .platform_name = "msm-pcm-routing", + .codec_name = "msm-stub-codec.1", + .codec_dai_name = "msm-stub-rx", + .no_pcm = 1, + .dpcm_playback = 1, + .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_3, + .be_hw_params_fixup = msm_tdm_be_hw_params_fixup, + .ops = &apq8096_tdm_be_ops, + .ignore_suspend = 1, + }, + { + .name = LPASS_BE_PRI_TDM_TX_0, + .stream_name = "Primary TDM0 Capture", + .cpu_dai_name = "msm-dai-q6-tdm.36865", + .platform_name = "msm-pcm-routing", + .codec_name = "msm-stub-codec.1", + .codec_dai_name = "msm-stub-rx", + .no_pcm = 1, + .dpcm_capture = 1, + .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_0, + .be_hw_params_fixup = msm_tdm_be_hw_params_fixup, + .ops = &apq8096_tdm_be_ops, + .ignore_suspend = 1, + }, + { + .name = LPASS_BE_PRI_TDM_TX_1, + .stream_name = "Primary TDM1 Capture", + .cpu_dai_name = "msm-dai-q6-tdm.36867", + .platform_name = "msm-pcm-routing", + .codec_name = "msm-stub-codec.1", + .codec_dai_name = "msm-stub-rx", + .no_pcm = 1, + .dpcm_capture = 1, + .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_1, + .be_hw_params_fixup = msm_tdm_be_hw_params_fixup, + .ops = &apq8096_tdm_be_ops, + .ignore_suspend = 1, + }, + { + .name = LPASS_BE_PRI_TDM_TX_2, + .stream_name = "Primary TDM2 Capture", + .cpu_dai_name = "msm-dai-q6-tdm.36869", + .platform_name = "msm-pcm-routing", + .codec_name = "msm-stub-codec.1", + .codec_dai_name = "msm-stub-rx", + .no_pcm = 1, + .dpcm_capture = 1, + .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_2, + .be_hw_params_fixup = msm_tdm_be_hw_params_fixup, + .ops = &apq8096_tdm_be_ops, + .ignore_suspend = 1, + }, + { + .name = LPASS_BE_PRI_TDM_TX_3, + .stream_name = "Primary TDM3 Capture", + .cpu_dai_name = "msm-dai-q6-tdm.36871", + .platform_name = "msm-pcm-routing", + .codec_name = "msm-stub-codec.1", + .codec_dai_name = "msm-stub-rx", + .no_pcm = 1, + .dpcm_capture = 1, + .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_3, + .be_hw_params_fixup = msm_tdm_be_hw_params_fixup, + .ops = &apq8096_tdm_be_ops, + .ignore_suspend = 1, + } }; static struct snd_soc_dai_link apq8096_hdmi_dai_link[] = { @@ -4795,7 +6074,6 @@ static int apq8096_asoc_machine_probe(struct platform_device *pdev) goto err; } dev_info(&pdev->dev, "Sound card %s registered\n", card->name); - return 0; err: diff --git a/sound/soc/msm/msm-pcm-hostless.c b/sound/soc/msm/msm-pcm-hostless.c index 57932433afe9..51b0a7208462 100644 --- a/sound/soc/msm/msm-pcm-hostless.c +++ b/sound/soc/msm/msm-pcm-hostless.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2014, 2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -25,7 +25,9 @@ static int msm_pcm_hostless_prepare(struct snd_pcm_substream *substream) pr_err("%s: invalid params\n", __func__); return -EINVAL; } - pm_qos_remove_request(&substream->latency_pm_qos_req); + if (pm_qos_request_active(&substream->latency_pm_qos_req)) + pm_qos_remove_request(&substream->latency_pm_qos_req); + return 0; } diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c index c462f682e160..471be3294881 100644 --- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c @@ -397,12 +397,9 @@ static int msm_compr_set_volume(struct snd_compr_stream *cstream, } else { gain_list[0] = volume_l; gain_list[1] = volume_r; - /* force sending FR/FL/FC volume for mono */ - if (prtd->num_channels == 1) { - gain_list[2] = volume_l; - num_channels = 3; - use_default = true; - } + gain_list[2] = volume_l; + num_channels = 3; + use_default = true; rc = q6asm_set_multich_gain(prtd->audio_client, num_channels, gain_list, chmap, use_default); } diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c index 35270e3340ec..ae6767d26921 100644 --- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c +++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c @@ -1167,7 +1167,7 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream, case SNDRV_LSM_SET_FWK_MODE_CONFIG: { u32 mode; - if (copy_from_user(&mode, arg, sizeof(mode))) { + if (copy_from_user(&mode, (void __user *) arg, sizeof(mode))) { dev_err(rtd->dev, "%s: %s: copy_frm_user failed\n", __func__, "LSM_SET_FWK_MODE_CONFIG"); return -EFAULT; diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c index b94eb6fbfeea..0d01803e634d 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c @@ -1448,12 +1448,13 @@ static int msm_pcm_add_compress_control(struct snd_soc_pcm_runtime *rtd) if (pdata) { if (!pdata->pcm) { pdata->pcm = rtd->pcm; - snd_soc_add_platform_controls(rtd->platform, - pcm_compress_control, - ARRAY_SIZE - (pcm_compress_control)); - pr_debug("%s: add control success plt = %pK\n", - __func__, rtd->platform); + ret = snd_soc_add_platform_controls(rtd->platform, + pcm_compress_control, + ARRAY_SIZE + (pcm_compress_control)); + if (ret < 0) + pr_err("%s: failed add ctl %s. err = %d\n", + __func__, mixer_str, ret); } } else { pr_err("%s: NULL pdata\n", __func__); @@ -1603,24 +1604,47 @@ done: return ret; } +static int msm_pcm_playback_pan_scale_ctl_info(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo) +{ + uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; + uinfo->count = sizeof(struct asm_stream_pan_ctrl_params); + return 0; +} + static int msm_pcm_playback_pan_scale_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int ret = 0; int len = 0; int i = 0; - struct snd_pcm_usr *usr_info = snd_kcontrol_chip(kcontrol); + struct snd_soc_component *usr_info = snd_kcontrol_chip(kcontrol); + struct snd_soc_platform *platform; + struct msm_plat_data *pdata; struct snd_pcm_substream *substream; struct msm_audio *prtd; struct asm_stream_pan_ctrl_params pan_param; - + char *usr_value = NULL; + uint32_t *gain_ptr = NULL; if (!usr_info) { pr_err("%s: usr_info is null\n", __func__); ret = -EINVAL; goto done; } - substream = usr_info->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; + platform = snd_soc_component_to_platform(usr_info); + if (!platform) { + pr_err("%s: platform is null\n", __func__); + ret = -EINVAL; + goto done; + } + pdata = dev_get_drvdata(platform->dev); + if (!pdata) { + pr_err("%s: pdata is null\n", __func__); + ret = -EINVAL; + goto done; + } + substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; if (!substream) { pr_err("%s substream not found\n", __func__); ret = -EINVAL; @@ -1637,54 +1661,71 @@ static int msm_pcm_playback_pan_scale_ctl_put(struct snd_kcontrol *kcontrol, ret = -EINVAL; goto done; } - pan_param.num_output_channels = - ucontrol->value.integer.value[len++]; + usr_value = (char *) ucontrol->value.bytes.data; + if (!usr_value) { + pr_err("%s ucontrol data is null\n", __func__); + ret = -EINVAL; + goto done; + } + memcpy(&pan_param.num_output_channels, &usr_value[len], + sizeof(pan_param.num_output_channels)); + len += sizeof(pan_param.num_output_channels); if (pan_param.num_output_channels > PCM_FORMAT_MAX_NUM_CHANNEL) { ret = -EINVAL; goto done; } - pan_param.num_input_channels = - ucontrol->value.integer.value[len++]; + memcpy(&pan_param.num_input_channels, &usr_value[len], + sizeof(pan_param.num_input_channels)); + len += sizeof(pan_param.num_input_channels); if (pan_param.num_input_channels > PCM_FORMAT_MAX_NUM_CHANNEL) { ret = -EINVAL; goto done; } - if (ucontrol->value.integer.value[len++]) { - for (i = 0; i < pan_param.num_output_channels; i++) { - pan_param.output_channel_map[i] = - ucontrol->value.integer.value[len++]; - } + if (usr_value[len++]) { + memcpy(pan_param.output_channel_map, &usr_value[len], + (pan_param.num_output_channels * + sizeof(pan_param.output_channel_map[0]))); + len += (pan_param.num_output_channels * + sizeof(pan_param.output_channel_map[0])); } - if (ucontrol->value.integer.value[len++]) { - for (i = 0; i < pan_param.num_input_channels; i++) { - pan_param.input_channel_map[i] = - ucontrol->value.integer.value[len++]; - } + if (usr_value[len++]) { + memcpy(pan_param.input_channel_map, &usr_value[len], + (pan_param.num_input_channels * + sizeof(pan_param.input_channel_map[0]))); + len += (pan_param.num_input_channels * + sizeof(pan_param.input_channel_map[0])); } - if (ucontrol->value.integer.value[len++]) { + if (usr_value[len++]) { + gain_ptr = (uint32_t *) &usr_value[len]; for (i = 0; i < pan_param.num_output_channels * pan_param.num_input_channels; i++) { pan_param.gain[i] = - !(ucontrol->value.integer.value[len++] > 0) ? + !(gain_ptr[i] > 0) ? 0 : 2 << 13; + len += sizeof(pan_param.gain[i]); } + len += (pan_param.num_input_channels * + pan_param.num_output_channels * sizeof(pan_param.gain[0])); } ret = q6asm_set_mfc_panning_params(prtd->audio_client, &pan_param); len -= pan_param.num_output_channels * - pan_param.num_input_channels; - for (i = 0; i < pan_param.num_output_channels * - pan_param.num_input_channels; i++) { - /* - * The data userspace passes is already in Q14 format. - * For volume gain is in Q28. - */ - pan_param.gain[i] = - ucontrol->value.integer.value[len++] << 14; + pan_param.num_input_channels * sizeof(pan_param.gain[0]); + if (gain_ptr) { + for (i = 0; i < pan_param.num_output_channels * + pan_param.num_input_channels; i++) { + /* + * The data userspace passes is already in Q14 format. + * For volume gain is in Q28. + */ + pan_param.gain[i] = + (gain_ptr[i]) << 14; + len += sizeof(pan_param.gain[i]); + } } ret = q6asm_set_vol_ctrl_gain_pair(prtd->audio_client, &pan_param); @@ -1701,40 +1742,60 @@ static int msm_pcm_playback_pan_scale_ctl_get(struct snd_kcontrol *kcontrol, static int msm_add_stream_pan_scale_controls(struct snd_soc_pcm_runtime *rtd) { - struct snd_pcm *pcm; - struct snd_pcm_usr *pan_ctl_info; - struct snd_kcontrol *kctl; const char *playback_mixer_ctl_name = "Audio Stream"; const char *deviceNo = "NN"; const char *suffix = "Pan Scale Control"; - int ctl_len, ret = 0; + char *mixer_str = NULL; + int ctl_len; + int ret = 0; + struct msm_plat_data *pdata; + struct snd_kcontrol_new pan_scale_control[1] = { + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "?", + .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, + .info = msm_pcm_playback_pan_scale_ctl_info, + .get = msm_pcm_playback_pan_scale_ctl_get, + .put = msm_pcm_playback_pan_scale_ctl_put, + .private_value = 0, + } + }; if (!rtd) { - pr_err("%s: rtd is NULL\n", __func__); - ret = -EINVAL; - goto done; + pr_err("%s: NULL rtd\n", __func__); + return -EINVAL; } - pcm = rtd->pcm; - ctl_len = strlen(playback_mixer_ctl_name) + 1 + strlen(deviceNo) + 1 + - strlen(suffix) + 1; - - ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, - NULL, 1, ctl_len, rtd->dai_link->be_id, - &pan_ctl_info); - - if (ret < 0) { - pr_err("%s: failed add ctl %s. err = %d\n", - __func__, suffix, ret); + ctl_len = strlen(playback_mixer_ctl_name) + 1 + + strlen(deviceNo) + 1 + strlen(suffix) + 1; + mixer_str = kzalloc(ctl_len, GFP_KERNEL); + if (!mixer_str) { + ret = -ENOMEM; goto done; } - kctl = pan_ctl_info->kctl; - snprintf(kctl->id.name, ctl_len, "%s %d %s", playback_mixer_ctl_name, - rtd->pcm->device, suffix); - kctl->put = msm_pcm_playback_pan_scale_ctl_put; - kctl->get = msm_pcm_playback_pan_scale_ctl_get; - pr_debug("%s: Registering new mixer ctl = %s\n", __func__, - kctl->id.name); + + snprintf(mixer_str, ctl_len, "%s %d %s", + playback_mixer_ctl_name, rtd->pcm->device, suffix); + pan_scale_control[0].name = mixer_str; + pan_scale_control[0].private_value = rtd->dai_link->be_id; + pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str); + pdata = dev_get_drvdata(rtd->platform->dev); + if (pdata) { + if (!pdata->pcm) + pdata->pcm = rtd->pcm; + ret = snd_soc_add_platform_controls(rtd->platform, + pan_scale_control, + ARRAY_SIZE + (pan_scale_control)); + if (ret < 0) + pr_err("%s: failed add ctl %s. err = %d\n", + __func__, mixer_str, ret); + } else { + pr_err("%s: NULL pdata\n", __func__); + ret = -EINVAL; + } + + kfree(mixer_str); done: return ret; @@ -1746,18 +1807,28 @@ static int msm_pcm_playback_dnmix_ctl_get(struct snd_kcontrol *kcontrol, return 0; } +static int msm_pcm_playback_dnmix_ctl_info(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo) +{ + uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; + uinfo->count = sizeof(struct asm_stream_pan_ctrl_params); + return 0; +} + static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int ret = 0; int len = 0; - int i = 0; - struct snd_pcm_usr *usr_info = snd_kcontrol_chip(kcontrol); + + struct snd_soc_component *usr_info = snd_kcontrol_chip(kcontrol); + struct snd_soc_platform *platform; + struct msm_plat_data *pdata; struct snd_pcm_substream *substream; struct msm_audio *prtd; struct asm_stream_pan_ctrl_params dnmix_param; - - int be_id = ucontrol->value.integer.value[len++]; + char *usr_value; + int be_id = 0; int stream_id = 0; if (!usr_info) { @@ -1765,7 +1836,19 @@ static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol, ret = -EINVAL; goto done; } - substream = usr_info->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; + platform = snd_soc_component_to_platform(usr_info); + if (!platform) { + pr_err("%s platform is null\n", __func__); + ret = -EINVAL; + goto done; + } + pdata = dev_get_drvdata(platform->dev); + if (!pdata) { + pr_err("%s pdata is null\n", __func__); + ret = -EINVAL; + goto done; + } + substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; if (!substream) { pr_err("%s substream not found\n", __func__); ret = -EINVAL; @@ -1781,40 +1864,51 @@ static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol, ret = -EINVAL; goto done; } + usr_value = (char *) ucontrol->value.bytes.data; + if (!usr_value) { + pr_err("%s usrvalue is null\n", __func__); + goto done; + } + memcpy(&be_id, usr_value, sizeof(be_id)); + len += sizeof(be_id); stream_id = prtd->audio_client->session; - dnmix_param.num_output_channels = - ucontrol->value.integer.value[len++]; + memcpy(&dnmix_param.num_output_channels, &usr_value[len], + sizeof(dnmix_param.num_output_channels)); + len += sizeof(dnmix_param.num_output_channels); if (dnmix_param.num_output_channels > PCM_FORMAT_MAX_NUM_CHANNEL) { ret = -EINVAL; goto done; } - dnmix_param.num_input_channels = - ucontrol->value.integer.value[len++]; + memcpy(&dnmix_param.num_input_channels, &usr_value[len], + sizeof(dnmix_param.num_input_channels)); + len += sizeof(dnmix_param.num_input_channels); if (dnmix_param.num_input_channels > PCM_FORMAT_MAX_NUM_CHANNEL) { ret = -EINVAL; goto done; } - - if (ucontrol->value.integer.value[len++]) { - for (i = 0; i < dnmix_param.num_output_channels; i++) { - dnmix_param.output_channel_map[i] = - ucontrol->value.integer.value[len++]; - } - } - if (ucontrol->value.integer.value[len++]) { - for (i = 0; i < dnmix_param.num_input_channels; i++) { - dnmix_param.input_channel_map[i] = - ucontrol->value.integer.value[len++]; - } - } - if (ucontrol->value.integer.value[len++]) { - for (i = 0; i < dnmix_param.num_output_channels * - dnmix_param.num_input_channels; i++) { - dnmix_param.gain[i] = - ucontrol->value.integer.value[len++]; - } + if (usr_value[len++]) { + memcpy(dnmix_param.output_channel_map, &usr_value[len], + (dnmix_param.num_output_channels * + sizeof(dnmix_param.output_channel_map[0]))); + len += (dnmix_param.num_output_channels * + sizeof(dnmix_param.output_channel_map[0])); + } + if (usr_value[len++]) { + memcpy(dnmix_param.input_channel_map, &usr_value[len], + (dnmix_param.num_input_channels * + sizeof(dnmix_param.input_channel_map[0]))); + len += (dnmix_param.num_input_channels * + sizeof(dnmix_param.input_channel_map[0])); + } + if (usr_value[len++]) { + memcpy(dnmix_param.gain, (uint32_t *) &usr_value[len], + (dnmix_param.num_input_channels * + dnmix_param.num_output_channels * + sizeof(dnmix_param.gain[0]))); + len += (dnmix_param.num_input_channels * + dnmix_param.num_output_channels * sizeof(dnmix_param.gain[0])); } msm_routing_set_downmix_control_data(be_id, stream_id, @@ -1826,39 +1920,58 @@ done: static int msm_add_device_down_mix_controls(struct snd_soc_pcm_runtime *rtd) { - struct snd_pcm *pcm; - struct snd_pcm_usr *usr_info; - struct snd_kcontrol *kctl; const char *playback_mixer_ctl_name = "Audio Device"; const char *deviceNo = "NN"; const char *suffix = "Downmix Control"; - int ctl_len, ret = 0; + char *mixer_str = NULL; + int ctl_len = 0, ret = 0; + struct msm_plat_data *pdata; + struct snd_kcontrol_new device_downmix_control[1] = { + { + .iface = SNDRV_CTL_ELEM_IFACE_MIXER, + .name = "?", + .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, + .info = msm_pcm_playback_dnmix_ctl_info, + .get = msm_pcm_playback_dnmix_ctl_get, + .put = msm_pcm_playback_dnmix_ctl_put, + .private_value = 0, + } + }; if (!rtd) { - pr_err("%s: rtd is NULL\n", __func__); + pr_err("%s NULL rtd\n", __func__); ret = -EINVAL; goto done; } - - pcm = rtd->pcm; ctl_len = strlen(playback_mixer_ctl_name) + 1 + - strlen(deviceNo) + 1 + strlen(suffix) + 1; - ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, - NULL, 1, ctl_len, rtd->dai_link->be_id, - &usr_info); - if (ret < 0) { - pr_err("%s: downmix control add failed: %d\n", - __func__, ret); + strlen(deviceNo) + 1 + strlen(suffix) + 1; + mixer_str = kzalloc(ctl_len, GFP_KERNEL); + if (!mixer_str) { + ret = -ENOMEM; goto done; } - kctl = usr_info->kctl; - snprintf(kctl->id.name, ctl_len, "%s %d %s", - playback_mixer_ctl_name, rtd->pcm->device, suffix); - kctl->put = msm_pcm_playback_dnmix_ctl_put; - kctl->get = msm_pcm_playback_dnmix_ctl_get; - pr_debug("%s: downmix control name = %s\n", - __func__, playback_mixer_ctl_name); + snprintf(mixer_str, ctl_len, "%s %d %s", + playback_mixer_ctl_name, rtd->pcm->device, suffix); + device_downmix_control[0].name = mixer_str; + device_downmix_control[0].private_value = rtd->dai_link->be_id; + pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str); + pdata = dev_get_drvdata(rtd->platform->dev); + if (pdata) { + if (!pdata->pcm) + pdata->pcm = rtd->pcm; + ret = snd_soc_add_platform_controls(rtd->platform, + device_downmix_control, + ARRAY_SIZE + (device_downmix_control)); + if (ret < 0) + pr_err("%s: failed add ctl %s. err = %d\n", + __func__, mixer_str, ret); + } else { + pr_err("%s: NULL pdata\n", __func__); + ret = -EINVAL; + } + kfree(mixer_str); done: return ret; } diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c index 7326e658c947..a45d89f80106 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c @@ -11119,7 +11119,7 @@ static int msm_routing_put_app_type_cfg_control(struct snd_kcontrol *kcontrol, static const struct snd_kcontrol_new app_type_cfg_controls[] = { SOC_SINGLE_MULTI_EXT("App Type Config", SND_SOC_NOPM, 0, - 0xFFFFFFFF, 0, 128, msm_routing_get_app_type_cfg_control, + 0x7FFFFFFF, 0, 128, msm_routing_get_app_type_cfg_control, msm_routing_put_app_type_cfg_control), }; @@ -12211,8 +12211,16 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = { 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("INT0_MI2S_RX", "INT0 MI2S Playback", 0, 0, 0, 0), + SND_SOC_DAPM_AIF_OUT("INT2_MI2S_RX", "INT2 MI2S Playback", + 0, 0, 0, 0), + SND_SOC_DAPM_AIF_OUT("INT3_MI2S_RX", "INT3 MI2S Playback", + 0, 0, 0, 0), + SND_SOC_DAPM_AIF_OUT("INT5_MI2S_RX", "INT5 MI2S Playback", + 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("INT4_MI2S_RX", "INT4 MI2S Playback", 0, 0, 0, 0), + SND_SOC_DAPM_AIF_OUT("INT4_MI2S_TX", "INT4 MI2S Capture", + 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("QUIN_MI2S_RX", "Quinary MI2S Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("PRI_I2S_TX", "Primary I2S Capture", 0, 0, 0, 0), @@ -12223,6 +12231,8 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = { 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture", 0, 0, 0, 0), + SND_SOC_DAPM_AIF_IN("INT0_MI2S_TX", "INT0 MI2S Capture", + 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("INT2_MI2S_TX", "INT2 MI2S Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("INT3_MI2S_TX", "INT3 MI2S Capture", @@ -15485,6 +15495,9 @@ static const struct snd_soc_dapm_route intercon[] = { {"BE_OUT", NULL, "PRI_MI2S_RX"}, {"BE_OUT", NULL, "INT0_MI2S_RX"}, {"BE_OUT", NULL, "INT4_MI2S_RX"}, + {"BE_OUT", NULL, "INT2_MI2S_RX"}, + {"BE_OUT", NULL, "INT3_MI2S_RX"}, + {"BE_OUT", NULL, "INT5_MI2S_RX"}, {"BE_OUT", NULL, "INT_BT_SCO_RX"}, {"BE_OUT", NULL, "INT_BT_A2DP_RX"}, {"BE_OUT", NULL, "INT_FM_RX"}, @@ -15524,8 +15537,10 @@ static const struct snd_soc_dapm_route intercon[] = { {"QUIN_MI2S_TX", NULL, "BE_IN"}, {"PRI_MI2S_TX", NULL, "BE_IN"}, {"TERT_MI2S_TX", NULL, "BE_IN"}, + {"INT0_MI2S_TX", NULL, "BE_IN"}, {"INT2_MI2S_TX", NULL, "BE_IN"}, {"INT3_MI2S_TX", NULL, "BE_IN"}, + {"INT4_MI2S_TX", NULL, "BE_IN"}, {"INT5_MI2S_TX", NULL, "BE_IN"}, {"SEC_MI2S_TX", NULL, "BE_IN"}, {"SENARY_MI2S_TX", NULL, "BE_IN" }, diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c index 14f9411104b3..c3d86e6cced2 100644 --- a/sound/soc/msm/qdsp6v2/q6asm.c +++ b/sound/soc/msm/qdsp6v2/q6asm.c @@ -44,7 +44,7 @@ #define TRUE 0x01 #define FALSE 0x00 -#define SESSION_MAX 8 +#define SESSION_MAX 9 #define ASM_MAX_CHANNELS 8 enum { ASM_TOPOLOGY_CAL = 0, @@ -1338,7 +1338,7 @@ int q6asm_audio_client_buf_alloc(unsigned int dir, pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session, bufsz, bufcnt); - if (ac->session <= 0 || ac->session > 8) { + if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) { pr_err("%s: Session ID is invalid, session = %d\n", __func__, ac->session); goto fail; @@ -1429,7 +1429,7 @@ int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir, __func__, ac->session, bufsz, bufcnt); - if (ac->session <= 0 || ac->session > 8) { + if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) { pr_err("%s: Session ID is invalid, session = %d\n", __func__, ac->session); goto fail; @@ -1738,7 +1738,7 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv) return -EINVAL; } - if (ac->session <= 0 || ac->session > 8) { + if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) { pr_err("%s: Session ID is invalid, session = %d\n", __func__, ac->session); return -EINVAL; @@ -3368,6 +3368,15 @@ int q6asm_set_shared_circ_buff(struct audio_client *ac, int bytes_to_alloc, rc; size_t len; + mutex_lock(&ac->cmd_lock); + + if (ac->port[dir].buf) { + pr_err("%s: Buffer already allocated\n", __func__); + rc = -EINVAL; + mutex_unlock(&ac->cmd_lock); + goto done; + } + buf_circ = kzalloc(sizeof(struct audio_buffer), GFP_KERNEL); if (!buf_circ) { @@ -3375,10 +3384,6 @@ int q6asm_set_shared_circ_buff(struct audio_client *ac, goto done; } - mutex_lock(&ac->cmd_lock); - - ac->port[dir].buf = buf_circ; - bytes_to_alloc = bufsz * bufcnt; bytes_to_alloc = PAGE_ALIGN(bytes_to_alloc); @@ -3390,11 +3395,12 @@ int q6asm_set_shared_circ_buff(struct audio_client *ac, if (rc) { pr_err("%s: Audio ION alloc is failed, rc = %d\n", __func__, rc); - mutex_unlock(&ac->cmd_lock); kfree(buf_circ); + mutex_unlock(&ac->cmd_lock); goto done; } + ac->port[dir].buf = buf_circ; buf_circ->used = dir ^ 1; buf_circ->size = bytes_to_alloc; buf_circ->actual_size = bytes_to_alloc; @@ -3559,12 +3565,6 @@ int q6asm_open_shared_io(struct audio_client *ac, goto done; } - if (ac->port[dir].buf) { - pr_err("%s: Buffer already allocated\n", __func__); - rc = -EINVAL; - goto done; - } - rc = q6asm_set_shared_circ_buff(ac, open, bufsz, bufcnt, dir); if (rc) @@ -7215,10 +7215,9 @@ int q6asm_send_rtic_event_ack(struct audio_client *ac, goto done; } - q6asm_add_hdr_async(ac, &ack.hdr, + q6asm_stream_add_hdr_async(ac, &ack.hdr, sizeof(struct avs_param_rtic_event_ack) + - params_length, TRUE); - atomic_set(&ac->cmd_state, -1); + params_length, TRUE, ac->stream_id); ack.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM_V2; ack.encdec.param_id = AVS_PARAM_ID_RTIC_EVENT_ACK; ack.encdec.param_size = params_length; @@ -7228,31 +7227,11 @@ int q6asm_send_rtic_event_ack(struct audio_client *ac, memcpy(asm_params + sizeof(struct avs_param_rtic_event_ack), param, params_length); rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params); - if (rc < 0) { + if (rc < 0) pr_err("%s: apr pkt failed for rtic event ack\n", __func__); - rc = -EINVAL; - goto fail_send_param; - } - - rc = wait_event_timeout(ac->cmd_wait, - (atomic_read(&ac->cmd_state) >= 0), 1 * HZ); - if (!rc) { - pr_err("%s: timeout for rtic event ack cmd\n", __func__); - rc = -ETIMEDOUT; - goto fail_send_param; - } - - if (atomic_read(&ac->cmd_state) > 0) { - pr_err("%s: DSP returned error[%s] for rtic event ack cmd\n", - __func__, adsp_err_get_err_str( - atomic_read(&ac->cmd_state))); - rc = adsp_err_get_lnx_err_code( - atomic_read(&ac->cmd_state)); - goto fail_send_param; - } - rc = 0; + else + rc = 0; -fail_send_param: kfree(asm_params); done: return rc; diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c index 0f28e100f535..948fb287023d 100644 --- a/sound/soc/msm/sdm660-internal.c +++ b/sound/soc/msm/sdm660-internal.c @@ -1291,6 +1291,9 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd) snd_soc_dapm_ignore_suspend(dapm, "AMIC1"); snd_soc_dapm_ignore_suspend(dapm, "AMIC2"); snd_soc_dapm_ignore_suspend(dapm, "AMIC3"); + snd_soc_dapm_sync(dapm); + + dapm = snd_soc_codec_get_dapm(dig_cdc); snd_soc_dapm_ignore_suspend(dapm, "DMIC1"); snd_soc_dapm_ignore_suspend(dapm, "DMIC2"); snd_soc_dapm_ignore_suspend(dapm, "DMIC3"); diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c index 2a5b3a293cd2..b123734f9fbd 100644 --- a/sound/soc/sh/rcar/adg.c +++ b/sound/soc/sh/rcar/adg.c @@ -437,7 +437,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv, struct device *dev = rsnd_priv_to_dev(priv); struct device_node *np = dev->of_node; u32 ckr, rbgx, rbga, rbgb; - u32 rate, req_rate, div; + u32 rate, req_rate = 0, div; uint32_t count = 0; unsigned long req_48kHz_rate, req_441kHz_rate; int i; diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index deed48ef28b8..362446c36c9e 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -192,19 +192,16 @@ void rsnd_mod_interrupt(struct rsnd_mod *mod, struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct rsnd_dai_stream *io; struct rsnd_dai *rdai; - int i, j; - - for_each_rsnd_dai(rdai, priv, j) { + int i; - for (i = 0; i < RSND_MOD_MAX; i++) { - io = &rdai->playback; - if (mod == io->mod[i]) - callback(mod, io); + for_each_rsnd_dai(rdai, priv, i) { + io = &rdai->playback; + if (mod == io->mod[mod->type]) + callback(mod, io); - io = &rdai->capture; - if (mod == io->mod[i]) - callback(mod, io); - } + io = &rdai->capture; + if (mod == io->mod[mod->type]) + callback(mod, io); } } @@ -1019,7 +1016,7 @@ static int rsnd_kctrl_put(struct snd_kcontrol *kctrl, } } - if (change) + if (change && cfg->update) cfg->update(cfg->io, mod); return change; diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c index 68b439ed22d7..460d29cbaaa5 100644 --- a/sound/soc/sh/rcar/src.c +++ b/sound/soc/sh/rcar/src.c @@ -691,13 +691,27 @@ static int _rsnd_src_stop_gen2(struct rsnd_mod *mod) { rsnd_src_irq_disable_gen2(mod); - rsnd_mod_write(mod, SRC_CTRL, 0); + /* + * stop SRC output only + * see rsnd_src_quit_gen2 + */ + rsnd_mod_write(mod, SRC_CTRL, 0x01); rsnd_src_error_record_gen2(mod); return rsnd_src_stop(mod); } +static int rsnd_src_quit_gen2(struct rsnd_mod *mod, + struct rsnd_dai_stream *io, + struct rsnd_priv *priv) +{ + /* stop both out/in */ + rsnd_mod_write(mod, SRC_CTRL, 0); + + return 0; +} + static void __rsnd_src_interrupt_gen2(struct rsnd_mod *mod, struct rsnd_dai_stream *io) { @@ -971,7 +985,7 @@ static struct rsnd_mod_ops rsnd_src_gen2_ops = { .probe = rsnd_src_probe_gen2, .remove = rsnd_src_remove_gen2, .init = rsnd_src_init_gen2, - .quit = rsnd_src_quit, + .quit = rsnd_src_quit_gen2, .start = rsnd_src_start_gen2, .stop = rsnd_src_stop_gen2, .hw_params = rsnd_src_hw_params, diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 1427ec21bd7e..c62a2947ac14 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -39,6 +39,7 @@ #define SCKP (1 << 13) /* Serial Bit Clock Polarity */ #define SWSP (1 << 12) /* Serial WS Polarity */ #define SDTA (1 << 10) /* Serial Data Alignment */ +#define PDTA (1 << 9) /* Parallel Data Alignment */ #define DEL (1 << 8) /* Serial Data Delay */ #define CKDV(v) (v << 4) /* Serial Clock Division Ratio */ #define TRMD (1 << 1) /* Transmit/Receive Mode Select */ @@ -286,7 +287,7 @@ static int rsnd_ssi_init(struct rsnd_mod *mod, struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); u32 cr; - cr = FORCE; + cr = FORCE | PDTA; /* * always use 32bit system word for easy clock calculation. diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 0ba9dfb854b3..9694fe13021f 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -203,11 +203,14 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir, dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n", be->dai_link->name, event, dir); - if (event == SND_SOC_DAPM_STREAM_STOP && be->dpcm[dir].users >= 1) { + + if ((event == SND_SOC_DAPM_STREAM_STOP) && + (be->dpcm[dir].users >= 1)) { pr_debug("%s Don't close BE \n", __func__); continue; } - snd_soc_dapm_stream_event(be, dir, event); + + snd_soc_dapm_stream_event(be, dir, event); } snd_soc_dapm_stream_event(fe, dir, event); diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index d5cc315a5eb4..ad139d45f5b2 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -553,6 +553,8 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag, if (size < sizeof(scale)) return -ENOMEM; + if (cval->min_mute) + scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE; scale[2] = cval->dBmin; scale[3] = cval->dBmax; if (copy_to_user(_tlv, scale, sizeof(scale))) diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index 3417ef347e40..2b4b067646ab 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -64,6 +64,7 @@ struct usb_mixer_elem_info { int cached; int cache_val[MAX_CHANNELS]; u8 initialized; + u8 min_mute; void *private_data; }; diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 04991b009132..5d2fc5f58bfe 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -1873,6 +1873,12 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, if (unitid == 7 && cval->control == UAC_FU_VOLUME) snd_dragonfly_quirk_db_scale(mixer, cval, kctl); break; + /* lowest playback value is muted on C-Media devices */ + case USB_ID(0x0d8c, 0x000c): + case USB_ID(0x0d8c, 0x0014): + if (strstr(kctl->id.name, "Playback")) + cval->min_mute = 1; + break; } } diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 29f38e2b4ca9..1cc20d138dae 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1143,6 +1143,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ + case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */ case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */ case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */ case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */ |
