diff options
107 files changed, 7670 insertions, 1889 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/android.txt b/Documentation/devicetree/bindings/arm/msm/android.txt index 7b8b7909bae3..9939643a097e 100644 --- a/Documentation/devicetree/bindings/arm/msm/android.txt +++ b/Documentation/devicetree/bindings/arm/msm/android.txt @@ -33,12 +33,18 @@ Required properties: -type: file system type of vendor partition -mnt_flags: mount flags -fsmgr_flags: fsmgr flags +-"android,firmware" for firmware image +-"android,vbmeta" for setting system properties for verified boot Example: firmware: firmware { android { compatible = "android,firmware"; + vbmeta { + compatible = "android,vbmeta"; + parts = "vbmeta,boot,system,vendor,dtbo,recovery"; + }; fstab { compatible = "android,fstab"; vendor { @@ -46,7 +52,7 @@ Example: dev = "/dev/block/platform/soc/1da4000.ufshc/by-name/vendor"; type = "ext4"; mnt_flags = "ro,barrier=1,discard"; - fsmgr_flags = "wait,slotselect"; + fsmgr_flags = "wait,slotselect,avb"; status = "ok"; }; }; diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt index ed1ddf597016..a7a2eda6c5a7 100644 --- a/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt +++ b/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt @@ -1,10 +1,10 @@ -Qualcomm Technologies PNP Flash LED +Qualcomm Technologies Inc. PNP Flash LED -QPNP (Qualcomm Technologies Plug N Play) Flash LED (Light +QPNP (Qualcomm Technologies Inc. Plug N Play) Flash LED (Light Emitting Diode) driver is used to provide illumination to camera sensor when background light is dim to capture good picture. It can also be used for flashlight/torch application. -It is part of PMIC on Qualcomm Technologies reference platforms. +It is part of PMIC on Qualcomm Technologies Inc. reference platforms. The PMIC is connected to the host processor via SPMI bus. Required properties: diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt new file mode 100644 index 000000000000..aef36af22fad --- /dev/null +++ b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt @@ -0,0 +1,262 @@ +Qualcomm Technologies, Inc. Haptics driver + +QPNP (Qualcomm Technologies, Inc. Plug N Play) Haptics is a peripheral on some +QTI PMICs. It can be interfaced with the host processor via SPMI or I2C bus. + +Haptics peripheral can support different actuators or vibrators, +1. Eccentric Rotation Mass (ERM) +2. Linear Resonant Actuator (LRA) + +Also, it can support multiple modes of operation: Direct, Buffer, PWM or Audio. + +Haptics device is described under a single level of node. + +Properties: + +- compatible + Usage: required + Value type: <string> + Definition: "qcom,qpnp-haptics". + +- reg + Usage: required + Value type: <u32> + Definition: Base address of haptics peripheral. + +- interrupts + Usage: required + Value type: <prop-encoded-array> + Definition: Peripheral interrupt specifier. + +- interrupt-names + Usage: required + Value type: <stringlist> + Definition: Interrupt names. This list must match up 1-to-1 with the + interrupts specified in the 'interrupts' property. Currently + supported interrupts are short-circuit and play. + +- qcom,pmic-revid + Usage: required + Value type: <phandle> + Definition: Should specify the phandle of PMIC's revid module. This is used to + identify the PMIC subtype. + +- qcom,pmic-misc + Usage: optional + Value type: <phandle> + Definition: Should specify the phandle of PMIC's misc module. This is used to + read the clock trim error register under MISC peripheral. + +- qcom,misc-clk-trim-error-reg + Usage: optional + Value type: <u32> + Definition: Register offset in MISC peripheral to read the clock trim error. + If this is specified, then qcom,pmic-misc should be specified. + +- qcom,actuator-type + Usage: optional + Value type: <u32> + Definition: Allowed values are 0 for LRA and 1 for ERM. If this is not + specified, then LRA type will be used by default. + +- qcom,play-mode + Usage: optional + Value type: <string> + Definition: Allowed values are: "direct", "buffer", "pwm", "auto". If not + specified for LRA actuator, auto mode will be selected by + default. + +- qcom,wave-shape + Usage: optional + Value type: <string> + Definition: Wave shape to be played. Allowed values: "sine" or "square". + Default value is "square". + +- qcom,wave-play-rate-us + Usage: optional + Value type: <u32> + Definition: Wave sample duration in microseconds. This should match with + the frequency the vibrator supports. + Allowed values are: 0 to 20475. Default value is 5715. + +- qcom,max-play-time-us + Usage: optional + Value type: <u32> + Definition: Maximum play time supported in microseconds. Default value is + 15000. + +- qcom,vmax-mv + Usage: optional + Value type: <u32> + Definition: Maximum output voltage in millivolts. Value specified here will + be rounded off to the closest multiple of 116 mV. + Allowed values: 0 to 3596. Default value is 3596. + +- qcom,ilim-ma + Usage: optional + Value type: <u32> + Definition: Output current limit in mA. Allowed values: 400 or 800. Default + value is 400. + +- qcom,en-brake + Usage: optional + Value type: <empty> + Definition: Enables internal reverse braking. + +- qcom,brake-pattern + Usage: optional + Value type: <prop-encoded-array> + Definition: Brake pattern to be applied. If specified, should be having + 4 elements. Allowed values for each element are: + 0, 1: Vmax/4, 2: Vmax/2, 3: Vmax. + +- qcom,sc-dbc-cycles + Usage: optional + Value type: <u32> + Definition: Short circuit debounce cycles for internal PWM. + Allowed values: 0, 8, 16 or 32. + +- vcc_pon-supply + Usage: optional + Value type: <phandle> + Definition: PON driver regulator required to force MBG_ON + +Following properties are specific only to LRA vibrators. + +- qcom,lra-auto-res-mode + Usage: optional + Value type: <string> + Definition: Auto resonance method. Allowed values are: + For pmi8998 and chips earlier, + "none" : No auto resonance + "zxd" : Zero crossing detection method + "qwd" : Quarter wave drive method + "max-qwd" : Maximum QWD + "zxd-eop" : ZXD + End of Pattern + For pm660, + "zxd" : Zero crossing detection method + "qwd" : Quarter wave drive method + +- qcom,lra-high-z + Usage: optional + Value type: <string> + Definition: High Z configuration for auto resonance. Allowed values are: + "none", "opt1", "opt2" and "opt3". + For pm660, "opt0" is valid value for 1 LRA period. + +- qcom,lra-res-cal-period + Usage: optional + Value type: <u32> + Definition: Auto resonance calibration period. Allowed values are: + For pmi8998 and chips earlier: 4, 8, 16, and 32. + For pm660: 4, 8, 16, 32, 64, 128 and 256. + +- qcom,lra-qwd-drive-duration + Usage: optional + Value type: <u32> + Definition: LRA drive duration in QWD mode. Applies only for pm660 currently. + Allowed values are: 0 and 1, for 1/4 and 3/8 LRA period. + respectively. + +- qcom,lra-calibrate-at-eop + Usage: optional + Value type: <u32> + Definition: Enables calibration at end of pattern. Applies only for pm660 + currently. Allowed values are: 0 and 1. + +- qcom,auto-res-err-recovery-hw + Usage: optional + Value type: <empty> + Definition: Enables Hardware auto resonance error recovery. Applies only for + pm660 currently. + +- qcom,drive-period-code-max-variation-pct + Usage: optional + Value type: <u32> + Definition: Maximum allowed variation of LRA drive period code in percentage + above which RATE_CFG registers will not be updated by SW when + auto resonance is enabled and auto resonance error correction + algorithm is running. If not specified, default value is 25%. + +- qcom,drive-period-code-min-variation-pct + Usage: optional + Value type: <u32> + Definition: Minimum allowed variation of LRA drive period code in percentage + below which RATE_CFG registers will not be updated by SW when + auto resonance is enabled and auto resonance error correction + algorithm is running. If not specified, default value is 25%. + +Following properties are applicable only when "qcom,play-mode" is set to +"buffer". + +- qcom,wave-rep-cnt + Usage: optional + Value type: <u32> + Definition: Repetition count for wave form. + Allowed values are: 1, 2, 4, 8, 16, 32, 64 and 128. Default + value is 1. + +- qcom,wave-samp-rep-cnt + Usage: optional + Value type: <u32> + Definition: Repetition count for each sample of wave form. Allowed values + are: 1, 2, 4 and 8. Default value is 1. + +- qcom,wave-samples + Usage: optional + Value type: <prop-encoded-array> + Definition: Wave samples in an array of 8 elements. Each element takes the + following representation, bit 0: unused, bits[5:1] : amplitude, + bit 6: overdrive, bit 7: sign. Default sample value is 0x3E. + +Following properties are applicable only when "qcom,play-mode" is set to +"pwm". + +- pwms + Usage: required, if "qcom,play-mode" is set to "pwm". + Value type: <phandle> + Definition: PWM device that is feeding its output to Haptics. + +- qcom,period-us + Usage: required, if "qcom,play-mode" is set to "pwm". + Value type: <u32> + Definition: PWM period in us. + +- qcom,duty-us + Usage: required, if "qcom,play-mode" is set to "pwm". + Value type: <u32> + Definition: PWM duty cycle in us. + +- qcom,ext-pwm-freq-khz + Usage: optional + Value type: <u32> + Definition: Frequency for external PWM in KHz. + Allowed values are: 25, 50, 75 and 100. + +- qcom,ext-pwm-dtest-line + Usage: optional + Value type: <u32> + Definition: DTEST line which is used for external PWM. + +Example: + qcom,haptics@c000 { + compatible = "qcom,qpnp-haptics"; + reg = <0xc000 0x100>; + interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_RISING>, + <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>; + interrupt-names = "hap-sc-irq", "hap-play-irq"; + qcom,pmic-revid = <&pmi8998_revid>; + qcom,pmic-misc = <&pmi8998_misc>; + qcom,misc-clk-trim-error-reg = <0xf3>; + qcom,actuator-type = <0>; + qcom,play-mode = "direct"; + qcom,vmax-mv = <3200>; + qcom,ilim-ma = <800>; + qcom,sc-dbc-cycles = <8>; + qcom,wave-play-rate-us = <6667>; + qcom,en-brake; + qcom,brake-pattern = <0x3 0x0 0x0 0x0>; + qcom,lra-high-z = "opt1"; + qcom,lra-auto-res-mode = "qwd"; + qcom,lra-res-cal-period = <4>; + }; diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt index f6a7a1ba3005..1e44686b6943 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt @@ -109,6 +109,10 @@ Parent node optional properties: this. If this property is not specified, low battery voltage threshold will be configured to 4200 mV. +- qcom,fg-rconn-mohm: Battery connector resistance (Rconn) in + milliohms. If Rconn is specified, then + Rslow values will be updated to account + it for an accurate ESR. - qcom,cycle-counter-en: Boolean property which enables the cycle counter feature. If this property is present, then the following properties @@ -143,6 +147,14 @@ Parent node optional properties: battery voltage shadow and the current predicted voltage in uV to initiate capacity learning. +- qcom,cl-max-limit-deciperc: The maximum percent that the capacity + cannot go above during any capacity + learning cycle. This property is in the + unit of .1% increments. +- qcom,cl-min-limit-deciperc: The minimum percent that the capacity + cannot go below during any capacity + learning cycle. This property is in the + unit of .1% increments. - qcom,capacity-estimation-on: A boolean property to have the fuel gauge driver attempt to estimate the battery capacity using battery @@ -178,6 +190,97 @@ Parent node optional properties: settings will be different from default. Once SOC crosses 5%, ESR pulse timings will be restored back to default. +- qcom,fg-control-slope-limiter: A boolean property to specify if SOC + slope limiter coefficients needs to + be modified based on charging status + and battery temperature threshold. +- qcom,fg-slope-limit-temp-threshold: Temperature threshold in decidegC used + for applying the slope coefficient based + on charging status and battery + temperature. If this property is not + specified, a default value of 100 (10C) + will be applied by default. +- qcom,fg-slope-limit-low-temp-chg: When the temperature goes below the + specified temperature threshold and + battery is charging, slope coefficient + specified with this property will be + applied. If this property is not + specified, a default value of 45 will be + applied. +- qcom,fg-slope-limit-low-temp-dischg: Same as "qcom,fg-slope-limit-low-temp-chg" + except this is when the battery is + discharging. +- qcom,fg-slope-limit-high-temp-chg: When the temperature goes above the + specified temperature threshold and + battery is charging, slope coefficient + specified with this property will be + applied. If this property is not + specified, a default value of 2 will be + applied. +- qcom,fg-slope-limit-high-temp-dischg: Same as "qcom,fg-slope-limit-high-temp-chg" + except this is when the battery is + discharging. +- qcom,fg-dischg-voltage-gain-ctrl: A boolean property to specify if the + voltage gain needs to be modified + during discharging based on monotonic + soc. +- qcom,fg-dischg-voltage-gain-soc: Array of monotonic SOC threshold values + to change the voltage gain settings + during discharge. This should be defined + in the ascending order and in the range + of 0-100. Array limit is set to 3. + If qcom,fg-dischg-voltage-gain-ctrl is + set, then this property should be + specified to apply the gain settings. +- qcom,fg-dischg-med-voltage-gain: Array of voltage gain values that needs + to be applied to medC voltage gain when + the monotonic SOC goes below the SOC + threshold specified under + qcom,fg-dischg-voltage-gain-soc. Array + limit is set to 3. + If qcom,fg-dischg-voltage-gain-ctrl is + set, then this property should be + specified to apply the gain setting. +- qcom,fg-dischg-high-voltage-gain: Array of voltage gain values that needs + to be applied to highC voltage gain when + the monotonic SOC goes below the SOC + threshold specified under + qcom,fg-dischg-voltage-gain-soc. Array + limit is set to 3. + If qcom,fg-dischg-voltage-gain-ctrl is + set, then this property should be + specified to apply the gain setting. +- qcom,fg-use-vbat-low-empty-soc: A boolean property to specify whether + vbatt-low interrupt is used to handle + empty battery condition. If this is + not specified, empty battery condition + is detected by empty-soc interrupt. +- qcom,fg-batt-temp-low-limit: Battery temperature (in decidegC) low + limit which will be used to validate + the battery temperature reading from FG. + If the battery temperature goes below + this limit, last read good temperature + will be notified to userspace. If this + limit is not specified, then the + default limit would be -60C. +- qcom,fg-batt-temp-high-limit: Battery temperature (in decidegC) high + limit which will be used to validate + the battery temperature reading from FG. + If the battery temperature goes above + this limit, last read good temperature + will be notified to userspace. If this + limit is not specified, then the + default limit would be 150C. +- qcom,fg-cc-soc-limit-pct: Percentage of CC_SOC before resetting + FG and restore the full CC_SOC value. +- qcom,fg-restore-batt-info: A boolean property to specify whether + battery parameters needs to be + restored. If this feature is enabled, + then validating the battery parameters + by OCV/battery SOC, validation range + in percentage should be specified via + appropriate module parameters to make + it work properly. qcom,fg-soc node required properties: - reg : offset and length of the PMIC peripheral register map. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 40dc329f142b..b189fc694693 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -112,14 +112,11 @@ min_adv_mss - INTEGER IP Fragmentation: -ipfrag_high_thresh - INTEGER - Maximum memory used to reassemble IP fragments. When - ipfrag_high_thresh bytes of memory is allocated for this purpose, - the fragment handler will toss packets until ipfrag_low_thresh - is reached. This also serves as a maximum limit to namespaces - different from the initial one. - -ipfrag_low_thresh - INTEGER +ipfrag_high_thresh - LONG INTEGER + Maximum memory used to reassemble IP fragments. + +ipfrag_low_thresh - LONG INTEGER + (Obsolete since linux-4.17) Maximum memory used to reassemble IP fragments before the kernel begins to remove incomplete fragment queues to free up resources. The kernel still accepts new fragments for defragmentation. @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 172 +SUBLEVEL = 174 EXTRAVERSION = NAME = Blurry Fish Butt diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 5f071762fb1c..6a2ae61748e4 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = { /* counts condition */ [PERF_COUNT_HW_INSTRUCTIONS] = "iall", - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ + /* All jump instructions that are taken */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ #ifdef CONFIG_ISA_ARCV2 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", diff --git a/arch/arm/boot/dts/qcom/msm-pmi8994.dtsi b/arch/arm/boot/dts/qcom/msm-pmi8994.dtsi index 06e86fa78773..26b1975786d9 100644 --- a/arch/arm/boot/dts/qcom/msm-pmi8994.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pmi8994.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, 2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -210,6 +210,7 @@ qcom,pmic-revid = <&pmi8994_revid>; qcom,force-aicl-rerun; qcom,aicl-rerun-period-s = <180>; + dpdm-supply = <&qusb_phy0>; qcom,chgr@1000 { reg = <0x1000 0x100>; @@ -397,6 +398,7 @@ reg = <0xb100 0x100>, <0xb042 0x7e>; reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base"; + qcom,lpg-lut-size = <0x7e>; qcom,channel-id = <0>; qcom,supported-sizes = <6>, <9>; qcom,ramp-index = <0>; @@ -408,6 +410,7 @@ reg = <0xb200 0x100>, <0xb042 0x7e>; reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base"; + qcom,lpg-lut-size = <0x7e>; qcom,channel-id = <1>; qcom,supported-sizes = <6>, <9>; qcom,ramp-index = <1>; @@ -419,6 +422,7 @@ reg = <0xb300 0x100>, <0xb042 0x7e>; reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base"; + qcom,lpg-lut-size = <0x7e>; qcom,channel-id = <2>; qcom,supported-sizes = <6>, <9>; qcom,ramp-index = <2>; @@ -430,6 +434,7 @@ reg = <0xb400 0x100>, <0xb042 0x7e>; reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base"; + qcom,lpg-lut-size = <0x7e>; qcom,channel-id = <3>; qcom,supported-sizes = <6>, <9>; qcom,ramp-index = <3>; @@ -442,6 +447,7 @@ #address-cells = <1>; #size-cells = <1>; qcom,pmic-revid = <&pmi8994_revid>; + qcom,qpnp-labibb-mode = "lcd"; ibb_regulator: qcom,ibb@dc00 { reg = <0xdc00 0x100>; @@ -510,13 +516,9 @@ pmi8994_wled: qcom,leds@d800 { compatible = "qcom,qpnp-wled"; reg = <0xd800 0x100>, - <0xd900 0x100>, - <0xdc00 0x100>, - <0xde00 0x100>; + <0xd900 0x100>; reg-names = "qpnp-wled-ctrl-base", - "qpnp-wled-sink-base", - "qpnp-wled-ibb-base", - "qpnp-wled-lab-base"; + "qpnp-wled-sink-base"; interrupts = <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>; interrupt-names = "sc-irq"; status = "okay"; @@ -540,30 +542,28 @@ qcom,pmic-revid = <&pmi8994_revid>; }; - pmi8994_haptics: qcom,haptic@c000 { - status = "disabled"; - compatible = "qcom,qpnp-haptic"; + pmi8994_haptics: qcom,haptics@c000 { + compatible = "qcom,qpnp-haptics"; reg = <0xc000 0x100>; interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>, <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>; - interrupt-names = "sc-irq", "play-irq"; + interrupt-names = "hap-sc-irq", "hap-play-irq"; + qcom,pmic-revid = <&pmi8994_revid>; vcc_pon-supply = <&pon_perph_reg>; + qcom,int-pwm-freq-khz = <505>; qcom,play-mode = "direct"; qcom,wave-play-rate-us = <5263>; - qcom,actuator-type = "lra"; + qcom,actuator-type = <0>; qcom,wave-shape = "square"; qcom,vmax-mv = <2000>; qcom,ilim-ma = <800>; qcom,sc-deb-cycles = <8>; - qcom,int-pwm-freq-khz = <505>; qcom,en-brake; - qcom,brake-pattern = [03 03 00 00]; - qcom,wave-samples = [3e 3e 3e 3e 3e 3e 3e 3e]; + qcom,brake-pattern = <0x3 0x3 0x0 0x0>; + qcom,wave-samples = <0x3e 0x3e 0x3e 0x3e 0x3e + 0x3e 0x3e 0x3e>; qcom,wave-rep-cnt = <1>; qcom,wave-samp-rep-cnt = <1>; - qcom,lra-high-z = "opt1"; - qcom,lra-auto-res-mode = "qwd"; - qcom,lra-res-cal-period = <4>; }; qcom,leds@d000 { diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c index 318394ed5c7a..5e11ad3164e0 100644 --- a/arch/arm/mach-cns3xxx/pcie.c +++ b/arch/arm/mach-cns3xxx/pcie.c @@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus, } else /* remote PCI bus */ base = cnspci->cfg1_regs + ((busno & 0xf) << 20); - return base + (where & 0xffc) + (devfn << 12); + return base + where + (devfn << 12); } static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig index ad998649d71f..931204f707a3 100644 --- a/arch/arm64/configs/cuttlefish_defconfig +++ b/arch/arm64/configs/cuttlefish_defconfig @@ -349,7 +349,6 @@ CONFIG_USB_CONFIGFS_F_MIDI=y CONFIG_MMC=y # CONFIG_MMC_BLOCK is not set CONFIG_RTC_CLASS=y -# CONFIG_RTC_HCTOSYS is not set # CONFIG_RTC_SYSTOHC is not set CONFIG_RTC_DRV_PL031=y CONFIG_VIRTIO_PCI=y diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig index 6f282aa1044e..da1b82b4b6e7 100644 --- a/arch/arm64/configs/msm-auto-perf_defconfig +++ b/arch/arm64/configs/msm-auto-perf_defconfig @@ -38,7 +38,7 @@ CONFIG_EMBEDDED=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -69,6 +69,7 @@ CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y CONFIG_SETEND_EMULATION=y +CONFIG_ARM64_PAN=y # CONFIG_EFI is not set CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig index 07a636b76d6e..e74a2ca4e42a 100644 --- a/arch/arm64/configs/msm-auto_defconfig +++ b/arch/arm64/configs/msm-auto_defconfig @@ -35,7 +35,7 @@ CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -67,6 +67,7 @@ CONFIG_ARMV8_DEPRECATED=y CONFIG_SWP_EMULATION=y CONFIG_CP15_BARRIER_EMULATION=y CONFIG_SETEND_EMULATION=y +CONFIG_ARM64_PAN=y CONFIG_CMDLINE="console=ttyAMA0" # CONFIG_EFI is not set CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig index a6e5ec7bda6a..d5899e2cfe7c 100644 --- a/arch/arm64/configs/msm-perf_defconfig +++ b/arch/arm64/configs/msm-perf_defconfig @@ -468,6 +468,7 @@ CONFIG_MMC_SDHCI_MSM=y CONFIG_LEDS_QPNP=y CONFIG_LEDS_QPNP_FLASH=y CONFIG_LEDS_QPNP_WLED=y +CONFIG_LEDS_QPNP_HAPTICS=y CONFIG_LEDS_TRIGGERS=y CONFIG_SWITCH=y CONFIG_RTC_CLASS=y @@ -581,6 +582,7 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_ECRYPT_FS=y CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_SDCARD_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig index 14ed727da342..201d8b98a4c1 100644 --- a/arch/arm64/configs/msm_defconfig +++ b/arch/arm64/configs/msm_defconfig @@ -457,6 +457,7 @@ CONFIG_LEDS_QPNP=y CONFIG_LEDS_QPNP_FLASH=y CONFIG_LEDS_QPNP_WLED=y CONFIG_LEDS_SYSCON=y +CONFIG_LEDS_QPNP_HAPTICS=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_LEDS_TRIGGER_CPU=y @@ -586,6 +587,7 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_ECRYPT_FS=y CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_SDCARD_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 8727f4490772..ba0127e31b1a 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -28,6 +28,8 @@ #include <asm/virt.h> .text + .pushsection .hyp.text, "ax" + .align 11 ENTRY(__hyp_stub_vectors) diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 61b0911aa475..298db9789fc7 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -75,10 +75,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr) { struct page *page = pte_page(pte); - /* no flushing needed for anonymous pages */ - if (!page_mapping(page)) - return; - if (!test_and_set_bit(PG_dcache_clean, &page->flags)) sync_icache_aliases(page_address(page), PAGE_SIZE << compound_order(page)); diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 8eccead675d4..cc7b450a7766 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -224,10 +224,10 @@ static noinline __init void detect_machine_type(void) if (stsi(vmms, 3, 2, 2) || !vmms->count) return; - /* Running under KVM? If not we assume z/VM */ + /* Detect known hypervisors */ if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; - else + else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) S390_lowcore.machine_flags |= MACHINE_FLAG_VM; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index e7a43a30e3ff..47692c78d09c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -833,6 +833,8 @@ void __init setup_arch(char **cmdline_p) pr_info("Linux is running under KVM in 64-bit mode\n"); else if (MACHINE_IS_LPAR) pr_info("Linux is running natively in 64-bit mode\n"); + else + pr_info("Linux is running as a guest in 64-bit mode\n"); /* Have one command line that is parsed and saved in /proc/cmdline */ /* boot_command_line has been already set up in early.c */ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 77f4f334a465..29e5409c0d48 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -360,9 +360,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data) */ void smp_call_ipl_cpu(void (*func)(void *), void *data) { + struct _lowcore *lc = pcpu_devices->lowcore; + + if (pcpu_devices[0].address == stap()) + lc = &S390_lowcore; + pcpu_delegate(&pcpu_devices[0], func, data, - pcpu_devices->lowcore->panic_stack - - PANIC_FRAME_OFFSET + PAGE_SIZE); + lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE); } int smp_find_processor_id(u16 address) @@ -1152,7 +1156,11 @@ static ssize_t __ref rescan_store(struct device *dev, { int rc; + rc = lock_device_hotplug_sysfs(); + if (rc) + return rc; rc = smp_rescan_cpus(); + unlock_device_hotplug(); return rc ? rc : count; } static DEVICE_ATTR(rescan, 0200, NULL, rescan_store); diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index 31dab2135188..21332b431f10 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c @@ -25,8 +25,8 @@ static inline u16 i8254(void) u16 status, timer; do { - outb(I8254_PORT_CONTROL, - I8254_CMD_READBACK | I8254_SELECT_COUNTER0); + outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0, + I8254_PORT_CONTROL); status = inb(I8254_PORT_COUNTER0); timer = inb(I8254_PORT_COUNTER0); timer |= inb(I8254_PORT_COUNTER0) << 8; diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig index fc2d30e1361d..afa9b589d1cc 100644 --- a/arch/x86/configs/x86_64_cuttlefish_defconfig +++ b/arch/x86/configs/x86_64_cuttlefish_defconfig @@ -378,7 +378,6 @@ CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y CONFIG_USB_CONFIGFS_UEVENT=y CONFIG_USB_CONFIGFS_F_MIDI=y CONFIG_RTC_CLASS=y -# CONFIG_RTC_HCTOSYS is not set CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_INPUT=y diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1a934bb8ed1c..758e2b39567d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5524,8 +5524,7 @@ restart: toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); - if (r == EMULATE_DONE && - (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) + if (r == EMULATE_DONE && ctxt->tf) kvm_vcpu_do_singlestep(vcpu, &r); if (!ctxt->have_exception || exception_type(ctxt->exception.vector) == EXCPT_TRAP) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index f22e4abba9b4..56bbd494df6a 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -5472,6 +5472,9 @@ static void print_binder_proc(struct seq_file *m, for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); + if (!print_all && !node->has_async_transaction) + continue; + /* * take a temporary reference on the node so it * survives and isn't removed from the tree diff --git a/drivers/base/core.c b/drivers/base/core.c index 23620c073b13..cef0f5c1bf85 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -867,6 +867,8 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) return; mutex_lock(&gdp_mutex); + if (!kobject_has_children(glue_dir)) + kobject_del(glue_dir); kobject_put(glue_dir); mutex_unlock(&gdp_mutex); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 599e31899a5d..91f19dfc388d 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -81,6 +81,7 @@ #include <asm/uaccess.h> static DEFINE_IDR(loop_index_idr); +static DEFINE_MUTEX(loop_index_mutex); static DEFINE_MUTEX(loop_ctl_mutex); static int max_part; @@ -1627,11 +1628,9 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, static int lo_open(struct block_device *bdev, fmode_t mode) { struct loop_device *lo; - int err; + int err = 0; - err = mutex_lock_killable(&loop_ctl_mutex); - if (err) - return err; + mutex_lock(&loop_index_mutex); lo = bdev->bd_disk->private_data; if (!lo) { err = -ENXIO; @@ -1640,20 +1639,18 @@ static int lo_open(struct block_device *bdev, fmode_t mode) atomic_inc(&lo->lo_refcnt); out: - mutex_unlock(&loop_ctl_mutex); + mutex_unlock(&loop_index_mutex); return err; } -static void lo_release(struct gendisk *disk, fmode_t mode) +static void __lo_release(struct loop_device *lo) { - struct loop_device *lo; int err; - mutex_lock(&loop_ctl_mutex); - lo = disk->private_data; if (atomic_dec_return(&lo->lo_refcnt)) - goto out_unlock; + return; + mutex_lock(&loop_ctl_mutex); if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { /* * In autoclear mode, stop the loop thread @@ -1670,10 +1667,16 @@ static void lo_release(struct gendisk *disk, fmode_t mode) loop_flush(lo); } -out_unlock: mutex_unlock(&loop_ctl_mutex); } +static void lo_release(struct gendisk *disk, fmode_t mode) +{ + mutex_lock(&loop_index_mutex); + __lo_release(disk->private_data); + mutex_unlock(&loop_index_mutex); +} + static const struct block_device_operations lo_fops = { .owner = THIS_MODULE, .open = lo_open, @@ -1954,7 +1957,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) struct kobject *kobj; int err; - mutex_lock(&loop_ctl_mutex); + mutex_lock(&loop_index_mutex); err = loop_lookup(&lo, MINOR(dev) >> part_shift); if (err < 0) err = loop_add(&lo, MINOR(dev) >> part_shift); @@ -1962,7 +1965,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) kobj = NULL; else kobj = get_disk(lo->lo_disk); - mutex_unlock(&loop_ctl_mutex); + mutex_unlock(&loop_index_mutex); *part = 0; return kobj; @@ -1972,13 +1975,9 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, unsigned long parm) { struct loop_device *lo; - int ret; - - ret = mutex_lock_killable(&loop_ctl_mutex); - if (ret) - return ret; + int ret = -ENOSYS; - ret = -ENOSYS; + mutex_lock(&loop_index_mutex); switch (cmd) { case LOOP_CTL_ADD: ret = loop_lookup(&lo, parm); @@ -1992,15 +1991,19 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, ret = loop_lookup(&lo, parm); if (ret < 0) break; + mutex_lock(&loop_ctl_mutex); if (lo->lo_state != Lo_unbound) { ret = -EBUSY; + mutex_unlock(&loop_ctl_mutex); break; } if (atomic_read(&lo->lo_refcnt) > 0) { ret = -EBUSY; + mutex_unlock(&loop_ctl_mutex); break; } lo->lo_disk->private_data = NULL; + mutex_unlock(&loop_ctl_mutex); idr_remove(&loop_index_idr, lo->lo_number); loop_remove(lo); break; @@ -2010,7 +2013,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, break; ret = loop_add(&lo, -1); } - mutex_unlock(&loop_ctl_mutex); + mutex_unlock(&loop_index_mutex); return ret; } @@ -2093,10 +2096,10 @@ static int __init loop_init(void) THIS_MODULE, loop_probe, NULL, NULL); /* pre-create number of devices given by config or max_loop */ - mutex_lock(&loop_ctl_mutex); + mutex_lock(&loop_index_mutex); for (i = 0; i < nr; i++) loop_add(&lo, i); - mutex_unlock(&loop_ctl_mutex); + mutex_unlock(&loop_index_mutex); printk(KERN_INFO "loop: module loaded\n"); return 0; diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c index 164544afd680..618f3df6c3b9 100644 --- a/drivers/char/mwave/mwavedd.c +++ b/drivers/char/mwave/mwavedd.c @@ -59,6 +59,7 @@ #include <linux/mutex.h> #include <linux/delay.h> #include <linux/serial_8250.h> +#include <linux/nospec.h> #include "smapi.h" #include "mwavedd.h" #include "3780i.h" @@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" " ipcnum %x entry usIntCount %x\n", @@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, " Invalid ipcnum %x\n", ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); PRINTK_3(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" " ipcnum %x, usIntCount %x\n", @@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, ipcnum); return -EINVAL; } + ipcnum = array_index_nospec(ipcnum, + ARRAY_SIZE(pDrvData->IPCs)); mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { pDrvData->IPCs[ipcnum].bIsEnabled = FALSE; diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 7930cc29f7f4..83aaeee13d73 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -428,6 +428,7 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector, struct sde_connector *c_conn; struct sde_connector_state *c_state; int idx, rc; + uint64_t fence_fd = 0; if (!connector || !state || !property) { SDE_ERROR("invalid argument(s), conn %pK, state %pK, prp %pK\n", @@ -472,6 +473,29 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector, SDE_ERROR("prep fb failed, %d\n", rc); } break; + case CONNECTOR_PROP_RETIRE_FENCE: + if (!val) + goto end; + + /* + * update the the offset to a timeline for commit completion + */ + rc = sde_fence_create(&c_conn->retire_fence, &fence_fd, 1); + if (rc) { + SDE_ERROR("fence create failed rc:%d\n", rc); + goto end; + } + + rc = copy_to_user((uint64_t __user *)val, &fence_fd, + sizeof(uint64_t)); + if (rc) { + SDE_ERROR("copy to user failed rc:%d\n", rc); + /* fence will be released with timeline update */ + put_unused_fd(fence_fd); + rc = -EFAULT; + goto end; + } + break; case CONNECTOR_PROP_TOPOLOGY_CONTROL: rc = sde_rm_check_property_topctl(val); if (rc) @@ -544,12 +568,14 @@ static int sde_connector_atomic_get_property(struct drm_connector *connector, c_state = to_sde_connector_state(state); idx = msm_property_index(&c_conn->property_info, property); - if (idx == CONNECTOR_PROP_RETIRE_FENCE) - rc = sde_fence_create(&c_conn->retire_fence, val, 0); - else + if (idx == CONNECTOR_PROP_RETIRE_FENCE) { + *val = ~0; + rc = 0; + } else { /* get cached property value */ rc = msm_property_atomic_get(&c_conn->property_info, c_state->property_values, 0, property, val); + } /* allow for custom override */ if (c_conn->ops.get_property) @@ -931,8 +957,8 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, "hdr_control", 0x0, 0, ~0, 0, CONNECTOR_PROP_HDR_CONTROL); - msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE", - 0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE); + msm_property_install_volatile_range(&c_conn->property_info, + "RETIRE_FENCE", 0x0, 0, ~0, 0, CONNECTOR_PROP_RETIRE_FENCE); msm_property_install_volatile_signed_range(&c_conn->property_info, "PLL_DELTA", 0x0, INT_MIN, INT_MAX, 0, diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 733ff5f686c0..9805c8e8acb4 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -1642,8 +1642,8 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, "input_fence_timeout", 0x0, 0, SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT, SDE_CRTC_INPUT_FENCE_TIMEOUT, CRTC_PROP_INPUT_FENCE_TIMEOUT); - msm_property_install_range(&sde_crtc->property_info, "output_fence", - 0x0, 0, INR_OPEN_MAX, 0x0, CRTC_PROP_OUTPUT_FENCE); + msm_property_install_volatile_range(&sde_crtc->property_info, + "output_fence", 0x0, 0, ~0, 0, CRTC_PROP_OUTPUT_FENCE); msm_property_install_range(&sde_crtc->property_info, "output_fence_offset", 0x0, 0, 1, 0, @@ -1708,6 +1708,28 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, kfree(info); } +static int _sde_crtc_get_output_fence(struct drm_crtc *crtc, + const struct drm_crtc_state *state, uint64_t *val) +{ + struct sde_crtc *sde_crtc; + struct sde_crtc_state *cstate; + uint32_t offset; + + sde_crtc = to_sde_crtc(crtc); + cstate = to_sde_crtc_state(state); + + offset = sde_crtc_get_property(cstate, CRTC_PROP_OUTPUT_FENCE_OFFSET); + + /* + * Hwcomposer now queries the fences using the commit list in atomic + * commit ioctl. The offset should be set to next timeline + * which will be incremented during the prepare commit phase + */ + offset++; + + return sde_fence_create(&sde_crtc->output_fence, val, offset); +} + /** * sde_crtc_atomic_set_property - atomically set a crtc drm property * @crtc: Pointer to drm crtc structure @@ -1724,28 +1746,61 @@ static int sde_crtc_atomic_set_property(struct drm_crtc *crtc, struct sde_crtc *sde_crtc; struct sde_crtc_state *cstate; int idx, ret = -EINVAL; + uint64_t fence_fd = 0; if (!crtc || !state || !property) { SDE_ERROR("invalid argument(s)\n"); + return -EINVAL; + } + + sde_crtc = to_sde_crtc(crtc); + cstate = to_sde_crtc_state(state); + + ret = msm_property_atomic_set(&sde_crtc->property_info, + cstate->property_values, cstate->property_blobs, + property, val); + + if (!ret) { + idx = msm_property_index(&sde_crtc->property_info, + property); + switch (idx) { + case CRTC_PROP_INPUT_FENCE_TIMEOUT: + _sde_crtc_set_input_fence_timeout(cstate); + break; + case CRTC_PROP_OUTPUT_FENCE: + if (!val) + goto exit; + + ret = _sde_crtc_get_output_fence(crtc, + state, &fence_fd); + if (ret) { + SDE_ERROR("fence create failed rc:%d\n", ret); + goto exit; + } + + ret = copy_to_user((uint64_t __user *)val, &fence_fd, + sizeof(uint64_t)); + + if (ret) { + SDE_ERROR("copy to user failed rc:%d\n", ret); + put_unused_fd(fence_fd); + ret = -EFAULT; + goto exit; + } + break; + default: + /* nothing to do */ + break; + } } else { - sde_crtc = to_sde_crtc(crtc); - cstate = to_sde_crtc_state(state); - ret = msm_property_atomic_set(&sde_crtc->property_info, - cstate->property_values, cstate->property_blobs, + ret = sde_cp_crtc_set_property(crtc, property, val); - if (!ret) { - idx = msm_property_index(&sde_crtc->property_info, - property); - if (idx == CRTC_PROP_INPUT_FENCE_TIMEOUT) - _sde_crtc_set_input_fence_timeout(cstate); - } else { - ret = sde_cp_crtc_set_property(crtc, - property, val); - } - if (ret) - DRM_ERROR("failed to set the property\n"); } +exit: + if (ret) + DRM_ERROR("failed to set the property\n"); + return ret; } @@ -1783,30 +1838,27 @@ static int sde_crtc_atomic_get_property(struct drm_crtc *crtc, if (!crtc || !state) { SDE_ERROR("invalid argument(s)\n"); - } else { - sde_crtc = to_sde_crtc(crtc); - cstate = to_sde_crtc_state(state); + return -EINVAL; + } - i = msm_property_index(&sde_crtc->property_info, property); - if (i == CRTC_PROP_OUTPUT_FENCE) { - int offset = sde_crtc_get_property(cstate, - CRTC_PROP_OUTPUT_FENCE_OFFSET); + sde_crtc = to_sde_crtc(crtc); + cstate = to_sde_crtc_state(state); - ret = sde_fence_create(&sde_crtc->output_fence, val, - offset); - if (ret) - SDE_ERROR("fence create failed\n"); - } else { - ret = msm_property_atomic_get(&sde_crtc->property_info, - cstate->property_values, - cstate->property_blobs, property, val); - if (ret) - ret = sde_cp_crtc_get_property(crtc, - property, val); - } + i = msm_property_index(&sde_crtc->property_info, property); + if (i == CRTC_PROP_OUTPUT_FENCE) { + *val = ~0; + ret = 0; + } else { + ret = msm_property_atomic_get(&sde_crtc->property_info, + cstate->property_values, + cstate->property_blobs, property, val); if (ret) - DRM_ERROR("get property failed\n"); + ret = sde_cp_crtc_get_property(crtc, + property, val); } + if (ret) + DRM_ERROR("get property failed\n"); + return ret; } diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index f55dcdf99bc5..26476a64e663 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -255,6 +255,8 @@ static const struct xpad_device { { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, + { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, @@ -431,6 +433,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ + XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c index a5a749183242..108976f795b5 100644 --- a/drivers/iommu/iommu-debug.c +++ b/drivers/iommu/iommu-debug.c @@ -1403,7 +1403,7 @@ static ssize_t iommu_debug_virt_addr_read(struct file *file, char __user *ubuf, else snprintf(buf, 100, "0x%pK\n", virt_addr); - buflen = min(count, strlen(buf)+1); + buflen = min(count, strlen(buf)); if (copy_to_user(ubuf, buf, buflen)) { pr_err("Couldn't copy_to_user\n"); retval = -EFAULT; @@ -1527,7 +1527,7 @@ static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf, else snprintf(buf, 100, "pte=%016llx\n", pte); - buflen = min(count, strlen(buf)+1); + buflen = min(count, strlen(buf)); if (copy_to_user(ubuf, buf, buflen)) { pr_err("Couldn't copy_to_user\n"); retval = -EFAULT; @@ -1596,7 +1596,7 @@ static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf, snprintf(buf, 100, "%pa\n", &phys); } - buflen = min(count, strlen(buf)+1); + buflen = min(count, strlen(buf)); if (copy_to_user(ubuf, buf, buflen)) { pr_err("Couldn't copy_to_user\n"); retval = -EFAULT; @@ -1649,7 +1649,7 @@ static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf, else snprintf(buf, 100, "%pa\n", &phys); - buflen = min(count, strlen(buf)+1); + buflen = min(count, strlen(buf)); if (copy_to_user(ubuf, buf, buflen)) { pr_err("Couldn't copy_to_user\n"); retval = -EFAULT; @@ -1880,7 +1880,7 @@ static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf, iova = ddev->iova; snprintf(buf, 100, "%pa\n", &iova); - buflen = min(count, strlen(buf)+1); + buflen = min(count, strlen(buf)); if (copy_to_user(ubuf, buf, buflen)) { pr_err("Couldn't copy_to_user\n"); retval = -EFAULT; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index c3d7a1461043..114d5883d497 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1230,13 +1230,14 @@ static void its_free_device(struct its_device *its_dev) kfree(its_dev); } -static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) { int idx; - idx = find_first_zero_bit(dev->event_map.lpi_map, - dev->event_map.nr_lpis); - if (idx == dev->event_map.nr_lpis) + idx = bitmap_find_free_region(dev->event_map.lpi_map, + dev->event_map.nr_lpis, + get_count_order(nvecs)); + if (idx < 0) return -ENOSPC; *hwirq = dev->event_map.lpi_base + idx; @@ -1317,20 +1318,20 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, int err; int i; - for (i = 0; i < nr_irqs; i++) { - err = its_alloc_device_irq(its_dev, &hwirq); - if (err) - return err; + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); + if (err) + return err; - err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); + for (i = 0; i < nr_irqs; i++) { + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); if (err) return err; irq_domain_set_hwirq_and_chip(domain, virq + i, - hwirq, &its_irq_chip, its_dev); + hwirq + i, &its_irq_chip, its_dev); pr_debug("ID:%d pID:%d vID:%d\n", - (int)(hwirq - its_dev->event_map.lpi_base), - (int) hwirq, virq + i); + (int)(hwirq + i - its_dev->event_map.lpi_base), + (int)(hwirq + i), virq + i); } return 0; diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 966227a3df1a..ab4d40857421 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -598,7 +598,7 @@ config LEDS_QPNP config LEDS_QPNP_FLASH tristate "Support for QPNP Flash LEDs" - depends on LEDS_CLASS && SPMI + depends on LEDS_CLASS && MFD_SPMI_PMIC help This driver supports the flash LED functionality of Qualcomm Technologies, Inc. QPNP PMICs. This driver supports PMICs up through @@ -642,6 +642,16 @@ config LEDS_VERSATILE This option enabled support for the LEDs on the ARM Versatile and RealView boards. Say Y to enabled these. +config LEDS_QPNP_HAPTICS + tristate "Haptics support for QPNP PMIC" + depends on LEDS_CLASS && MFD_SPMI_PMIC + help + This option enables device driver support for the haptics peripheral + found on Qualcomm Technologies, Inc. QPNP PMICs. The haptic + peripheral is capable of driving both LRA and ERM vibrators. This + module provides haptic feedback for user actions such as a long press + on the touch screen. + comment "LED Triggers" source "drivers/leds/trigger/Kconfig" diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 8d8ba9175810..17cd850ad9ea 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -61,8 +61,8 @@ obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o obj-$(CONFIG_LEDS_QPNP) += leds-qpnp.o -obj-$(CONFIG_LEDS_QPNP_FLASH) += leds-qpnp-flash.o -obj-$(CONFIG_LEDS_QPNP_FLASH_V2) += leds-qpnp-flash-v2.o +obj-$(CONFIG_LEDS_QPNP_FLASH) += leds-qpnp-flash.o leds-qpnp-flash-common.o +obj-$(CONFIG_LEDS_QPNP_FLASH_V2) += leds-qpnp-flash-v2.o leds-qpnp-flash-common.o obj-$(CONFIG_LEDS_QPNP_WLED) += leds-qpnp-wled.o obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o @@ -70,6 +70,7 @@ obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o obj-$(CONFIG_LEDS_SEAD3) += leds-sead3.o +obj-$(CONFIG_LEDS_QPNP_HAPTICS) += leds-qpnp-haptics.o # LED SPI Drivers obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o diff --git a/drivers/leds/leds-qpnp-flash-common.c b/drivers/leds/leds-qpnp-flash-common.c new file mode 100644 index 000000000000..5aed9100bde4 --- /dev/null +++ b/drivers/leds/leds-qpnp-flash-common.c @@ -0,0 +1,16 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/leds-qpnp-flash.h> + +int (*qpnp_flash_led_prepare)(struct led_trigger *trig, int options, + int *max_current); diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c index c90633b16fad..86e70689ce2d 100644 --- a/drivers/leds/leds-qpnp-flash-v2.c +++ b/drivers/leds/leds-qpnp-flash-v2.c @@ -1248,7 +1248,7 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on) return 0; } -int qpnp_flash_led_prepare(struct led_trigger *trig, int options, +static int qpnp_flash_led_prepare_v2(struct led_trigger *trig, int options, int *max_current) { struct led_classdev *led_cdev; @@ -2249,6 +2249,7 @@ static int qpnp_flash_led_probe(struct platform_device *pdev) if (!led->pdata) return -ENOMEM; + qpnp_flash_led_prepare = qpnp_flash_led_prepare_v2; rc = qpnp_flash_led_parse_common_dt(led, node); if (rc < 0) { pr_err("Failed to parse common flash LED device tree\n"); diff --git a/drivers/leds/leds-qpnp-flash.c b/drivers/leds/leds-qpnp-flash.c index 493631774936..cbb51c24dcf2 100644 --- a/drivers/leds/leds-qpnp-flash.c +++ b/drivers/leds/leds-qpnp-flash.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1207,7 +1207,7 @@ error_regulator_enable: return rc; } -int qpnp_flash_led_prepare(struct led_trigger *trig, int options, +static int qpnp_flash_led_prepare_v1(struct led_trigger *trig, int options, int *max_current) { struct led_classdev *led_cdev = trigger_to_lcdev(trig); @@ -1269,7 +1269,7 @@ static void qpnp_flash_led_work(struct work_struct *work) int max_curr_avail_ma = 0; int total_curr_ma = 0; int i; - u8 val; + u8 val = 0; uint temp; mutex_lock(&led->flash_led_lock); @@ -2226,7 +2226,6 @@ static int qpnp_flash_led_parse_common_dt( "Invalid thermal derate rate\n"); return -EINVAL; } - led->pdata->thermal_derate_rate = (u8)temp_val; } else { dev_err(&led->pdev->dev, @@ -2468,16 +2467,18 @@ static int qpnp_flash_led_probe(struct platform_device *pdev) led->pdev = pdev; led->current_addr = FLASH_LED0_CURRENT(led->base); led->current2_addr = FLASH_LED1_CURRENT(led->base); + qpnp_flash_led_prepare = qpnp_flash_led_prepare_v1; led->pdata = devm_kzalloc(&pdev->dev, sizeof(*led->pdata), GFP_KERNEL); if (!led->pdata) return -ENOMEM; - led->peripheral_type = (u8)qpnp_flash_led_get_peripheral_type(led); - if (led->peripheral_type < 0) { + rc = qpnp_flash_led_get_peripheral_type(led); + if (rc < 0) { dev_err(&pdev->dev, "Failed to get peripheral type\n"); return rc; } + led->peripheral_type = (u8) rc; rc = qpnp_flash_led_parse_common_dt(led, node); if (rc) { @@ -2520,6 +2521,7 @@ static int qpnp_flash_led_probe(struct platform_device *pdev) } for_each_child_of_node(node, temp) { + j = -1; led->flash_node[i].cdev.brightness_set = qpnp_flash_led_brightness_set; led->flash_node[i].cdev.brightness_get = @@ -2594,7 +2596,6 @@ static int qpnp_flash_led_probe(struct platform_device *pdev) if (rc) goto error_led_register; } - i++; } @@ -2606,7 +2607,7 @@ static int qpnp_flash_led_probe(struct platform_device *pdev) (long)root); if (PTR_ERR(root) == -ENODEV) pr_err("debugfs is not enabled in kernel"); - goto error_led_debugfs; + goto error_free_led_sysfs; } led->dbgfs_root = root; @@ -2636,6 +2637,8 @@ static int qpnp_flash_led_probe(struct platform_device *pdev) return 0; error_led_debugfs: + debugfs_remove_recursive(root); +error_free_led_sysfs: i = led->num_leds - 1; j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1; error_led_register: @@ -2646,7 +2649,6 @@ error_led_register: j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1; led_classdev_unregister(&led->flash_node[i].cdev); } - debugfs_remove_recursive(root); mutex_destroy(&led->flash_led_lock); destroy_workqueue(led->ordered_workq); diff --git a/drivers/leds/leds-qpnp-haptics.c b/drivers/leds/leds-qpnp-haptics.c new file mode 100644 index 000000000000..9c62ab6521e8 --- /dev/null +++ b/drivers/leds/leds-qpnp-haptics.c @@ -0,0 +1,2551 @@ +/* Copyright (c) 2014-2015, 2017, 2019, The Linux Foundation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "haptics: %s: " fmt, __func__ + +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/hrtimer.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> +#include <linux/qpnp-misc.h> +#include <linux/qpnp/qpnp-revid.h> + +/* Register definitions */ +#define HAP_STATUS_1_REG(chip) (chip->base + 0x0A) +#define HAP_BUSY_BIT BIT(1) +#define SC_FLAG_BIT BIT(3) +#define AUTO_RES_ERROR_BIT BIT(4) + +#define HAP_LRA_AUTO_RES_LO_REG(chip) (chip->base + 0x0B) +#define HAP_LRA_AUTO_RES_HI_REG(chip) (chip->base + 0x0C) + +#define HAP_INT_RT_STS_REG(chip) (chip->base + 0x10) +#define SC_INT_RT_STS_BIT BIT(0) +#define PLAY_INT_RT_STS_BIT BIT(1) + +#define HAP_EN_CTL_REG(chip) (chip->base + 0x46) +#define HAP_EN_BIT BIT(7) + +#define HAP_EN_CTL2_REG(chip) (chip->base + 0x48) +#define BRAKE_EN_BIT BIT(0) + +#define HAP_AUTO_RES_CTRL_REG(chip) (chip->base + 0x4B) +#define AUTO_RES_EN_BIT BIT(7) +#define AUTO_RES_ERR_RECOVERY_BIT BIT(3) + +#define HAP_CFG1_REG(chip) (chip->base + 0x4C) +#define HAP_ACT_TYPE_MASK BIT(0) +#define HAP_LRA 0 +#define HAP_ERM 1 + +#define HAP_CFG2_REG(chip) (chip->base + 0x4D) +#define HAP_WAVE_SINE 0 +#define HAP_WAVE_SQUARE 1 +#define HAP_LRA_RES_TYPE_MASK BIT(0) + +#define HAP_SEL_REG(chip) (chip->base + 0x4E) +#define HAP_WF_SOURCE_MASK GENMASK(5, 4) +#define HAP_WF_SOURCE_SHIFT 4 + +#define HAP_LRA_AUTO_RES_REG(chip) (chip->base + 0x4F) +/* For pmi8998 */ +#define LRA_AUTO_RES_MODE_MASK GENMASK(6, 4) +#define LRA_AUTO_RES_MODE_SHIFT 4 +#define LRA_HIGH_Z_MASK GENMASK(3, 2) +#define LRA_HIGH_Z_SHIFT 2 +#define LRA_RES_CAL_MASK GENMASK(1, 0) +#define HAP_RES_CAL_PERIOD_MIN 4 +#define HAP_RES_CAL_PERIOD_MAX 32 +/* For pm660 */ +#define PM660_AUTO_RES_MODE_BIT BIT(7) +#define PM660_AUTO_RES_MODE_SHIFT 7 +#define PM660_CAL_DURATION_MASK GENMASK(6, 5) +#define PM660_CAL_DURATION_SHIFT 5 +#define PM660_QWD_DRIVE_DURATION_BIT BIT(4) +#define PM660_QWD_DRIVE_DURATION_SHIFT 4 +#define PM660_CAL_EOP_BIT BIT(3) +#define PM660_CAL_EOP_SHIFT 3 +#define PM660_LRA_RES_CAL_MASK GENMASK(2, 0) +#define HAP_PM660_RES_CAL_PERIOD_MAX 256 + +#define HAP_VMAX_CFG_REG(chip) (chip->base + 0x51) +#define HAP_VMAX_OVD_BIT BIT(6) +#define HAP_VMAX_MASK GENMASK(5, 1) +#define HAP_VMAX_SHIFT 1 +#define HAP_VMAX_MIN_MV 116 +#define HAP_VMAX_MAX_MV 3596 + +#define HAP_ILIM_CFG_REG(chip) (chip->base + 0x52) +#define HAP_ILIM_SEL_MASK BIT(0) +#define HAP_ILIM_400_MA 0 +#define HAP_ILIM_800_MA 1 + +#define HAP_SC_DEB_REG(chip) (chip->base + 0x53) +#define HAP_SC_DEB_MASK GENMASK(2, 0) +#define HAP_SC_DEB_CYCLES_MIN 0 +#define HAP_DEF_SC_DEB_CYCLES 8 +#define HAP_SC_DEB_CYCLES_MAX 32 + +#define HAP_RATE_CFG1_REG(chip) (chip->base + 0x54) +#define HAP_RATE_CFG1_MASK GENMASK(7, 0) + +#define HAP_RATE_CFG2_REG(chip) (chip->base + 0x55) +#define HAP_RATE_CFG2_MASK GENMASK(3, 0) +/* Shift needed to convert drive period upper bits [11:8] */ +#define HAP_RATE_CFG2_SHIFT 8 + +#define HAP_INT_PWM_REG(chip) (chip->base + 0x56) +#define INT_PWM_FREQ_SEL_MASK GENMASK(1, 0) +#define INT_PWM_FREQ_253_KHZ 0 +#define INT_PWM_FREQ_505_KHZ 1 +#define INT_PWM_FREQ_739_KHZ 2 +#define INT_PWM_FREQ_1076_KHZ 3 + +#define HAP_EXT_PWM_REG(chip) (chip->base + 0x57) +#define EXT_PWM_FREQ_SEL_MASK GENMASK(1, 0) +#define EXT_PWM_FREQ_25_KHZ 0 +#define EXT_PWM_FREQ_50_KHZ 1 +#define EXT_PWM_FREQ_75_KHZ 2 +#define EXT_PWM_FREQ_100_KHZ 3 + +#define HAP_PWM_CAP_REG(chip) (chip->base + 0x58) + +#define HAP_SC_CLR_REG(chip) (chip->base + 0x59) +#define SC_CLR_BIT BIT(0) + +#define HAP_BRAKE_REG(chip) (chip->base + 0x5C) +#define HAP_BRAKE_PAT_MASK 0x3 + +#define HAP_WF_REPEAT_REG(chip) (chip->base + 0x5E) +#define WF_REPEAT_MASK GENMASK(6, 4) +#define WF_REPEAT_SHIFT 4 +#define WF_REPEAT_MIN 1 +#define WF_REPEAT_MAX 128 +#define WF_S_REPEAT_MASK GENMASK(1, 0) +#define WF_S_REPEAT_MIN 1 +#define WF_S_REPEAT_MAX 8 + +#define HAP_WF_S1_REG(chip) (chip->base + 0x60) +#define HAP_WF_SIGN_BIT BIT(7) +#define HAP_WF_OVD_BIT BIT(6) +#define HAP_WF_SAMP_MAX GENMASK(5, 1) +#define HAP_WF_SAMPLE_LEN 8 + +#define HAP_PLAY_REG(chip) (chip->base + 0x70) +#define PLAY_BIT BIT(7) +#define PAUSE_BIT BIT(0) + +#define HAP_SEC_ACCESS_REG(chip) (chip->base + 0xD0) + +#define HAP_TEST2_REG(chip) (chip->base + 0xE3) +#define HAP_EXT_PWM_DTEST_MASK GENMASK(6, 4) +#define HAP_EXT_PWM_DTEST_SHIFT 4 +#define PWM_MAX_DTEST_LINES 4 +#define HAP_EXT_PWM_PEAK_DATA 0x7F +#define HAP_EXT_PWM_HALF_DUTY 50 +#define HAP_EXT_PWM_FULL_DUTY 100 +#define HAP_EXT_PWM_DATA_FACTOR 39 + +/* Other definitions */ +#define HAP_BRAKE_PAT_LEN 4 +#define HAP_WAVE_SAMP_LEN 8 +#define NUM_WF_SET 4 +#define HAP_WAVE_SAMP_SET_LEN (HAP_WAVE_SAMP_LEN * NUM_WF_SET) +#define HAP_RATE_CFG_STEP_US 5 +#define HAP_WAVE_PLAY_RATE_US_MIN 0 +#define HAP_DEF_WAVE_PLAY_RATE_US 5715 +#define HAP_WAVE_PLAY_RATE_US_MAX 20475 +#define HAP_MAX_PLAY_TIME_MS 15000 + +enum hap_brake_pat { + NO_BRAKE = 0, + BRAKE_VMAX_4, + BRAKE_VMAX_2, + BRAKE_VMAX, +}; + +enum hap_auto_res_mode { + HAP_AUTO_RES_NONE, + HAP_AUTO_RES_ZXD, + HAP_AUTO_RES_QWD, + HAP_AUTO_RES_MAX_QWD, + HAP_AUTO_RES_ZXD_EOP, +}; + +enum hap_pm660_auto_res_mode { + HAP_PM660_AUTO_RES_ZXD, + HAP_PM660_AUTO_RES_QWD, +}; + +/* high Z option lines */ +enum hap_high_z { + HAP_LRA_HIGH_Z_NONE, /* opt0 for PM660 */ + HAP_LRA_HIGH_Z_OPT1, + HAP_LRA_HIGH_Z_OPT2, + HAP_LRA_HIGH_Z_OPT3, +}; + +/* play modes */ +enum hap_mode { + HAP_DIRECT, + HAP_BUFFER, + HAP_AUDIO, + HAP_PWM, +}; + +/* wave/sample repeat */ +enum hap_rep_type { + HAP_WAVE_REPEAT = 1, + HAP_WAVE_SAMP_REPEAT, +}; + +/* status flags */ +enum hap_status { + AUTO_RESONANCE_ENABLED = BIT(0), +}; + +enum hap_play_control { + HAP_STOP, + HAP_PAUSE, + HAP_PLAY, +}; + +/* pwm channel parameters */ +struct pwm_param { + struct pwm_device *pwm_dev; + u32 duty_us; + u32 period_us; +}; + +/* + * hap_lra_ares_param - Haptic auto_resonance parameters + * @ lra_qwd_drive_duration - LRA QWD drive duration + * @ calibrate_at_eop - Calibrate at EOP + * @ lra_res_cal_period - LRA resonance calibration period + * @ auto_res_mode - auto resonace mode + * @ lra_high_z - high z option line + */ +struct hap_lra_ares_param { + int lra_qwd_drive_duration; + int calibrate_at_eop; + enum hap_high_z lra_high_z; + u16 lra_res_cal_period; + u8 auto_res_mode; +}; + +/* + * hap_chip - Haptics data structure + * @ pdev - platform device pointer + * @ regmap - regmap pointer + * @ bus_lock - spin lock for bus read/write + * @ play_lock - mutex lock for haptics play/enable control + * @ haptics_work - haptics worker + * @ stop_timer - hrtimer for stopping haptics + * @ auto_res_err_poll_timer - hrtimer for auto-resonance error + * @ base - base address + * @ play_irq - irq for play + * @ sc_irq - irq for short circuit + * @ pwm_data - pwm configuration + * @ ares_cfg - auto resonance configuration + * @ play_time_ms - play time set by the user in ms + * @ max_play_time_ms - max play time in ms + * @ vmax_mv - max voltage in mv + * @ ilim_ma - limiting current in ma + * @ sc_deb_cycles - short circuit debounce cycles + * @ wave_play_rate_us - play rate for waveform + * @ last_rate_cfg - Last rate config updated + * @ wave_rep_cnt - waveform repeat count + * @ wave_s_rep_cnt - waveform sample repeat count + * @ wf_samp_len - waveform sample length + * @ ext_pwm_freq_khz - external pwm frequency in KHz + * @ ext_pwm_dtest_line - DTEST line for external pwm + * @ status_flags - status + * @ play_mode - play mode + * @ act_type - actuator type + * @ wave_shape - waveform shape + * @ wave_samp_idx - wave sample id used to refer start of a sample set + * @ wave_samp - array of wave samples + * @ brake_pat - pattern for active breaking + * @ en_brake - brake state + * @ misc_clk_trim_error_reg - MISC clock trim error register if present + * @ clk_trim_error_code - MISC clock trim error code + * @ drive_period_code_max_limit - calculated drive period code with + percentage variation on the higher side. + * @ drive_period_code_min_limit - calculated drive period code with + percentage variation on the lower side + * @ drive_period_code_max_var_pct - maximum limit of percentage variation of + drive period code + * @ drive_period_code_min_var_pct - minimum limit of percentage variation of + drive period code + * @ last_sc_time - Last time short circuit was detected + * @ sc_count - counter to determine the duration of short circuit + condition + * @ perm_disable - Flag to disable module permanently + * @ state - current state of haptics + * @ module_en - module enable status of haptics + * @ lra_auto_mode - Auto mode selection + * @ play_irq_en - Play interrupt enable status + * @ auto_res_err_recovery_hw - Enable auto resonance error recovery by HW + */ +struct hap_chip { + struct platform_device *pdev; + struct regmap *regmap; + struct pmic_revid_data *revid; + struct led_classdev cdev; + spinlock_t bus_lock; + struct mutex play_lock; + struct mutex param_lock; + struct work_struct haptics_work; + struct hrtimer stop_timer; + struct hrtimer auto_res_err_poll_timer; + u16 base; + int play_irq; + int sc_irq; + struct pwm_param pwm_data; + struct hap_lra_ares_param ares_cfg; + struct regulator *vcc_pon; + u32 play_time_ms; + u32 max_play_time_ms; + u32 vmax_mv; + u8 ilim_ma; + u32 sc_deb_cycles; + u32 wave_play_rate_us; + u16 last_rate_cfg; + u32 wave_rep_cnt; + u32 wave_s_rep_cnt; + u32 wf_samp_len; + u32 ext_pwm_freq_khz; + u8 ext_pwm_dtest_line; + u32 status_flags; + enum hap_mode play_mode; + u8 act_type; + u8 wave_shape; + u8 wave_samp_idx; + u32 wave_samp[HAP_WAVE_SAMP_SET_LEN]; + u32 brake_pat[HAP_BRAKE_PAT_LEN]; + bool en_brake; + u32 misc_clk_trim_error_reg; + u8 clk_trim_error_code; + u16 drive_period_code_max_limit; + u16 drive_period_code_min_limit; + u8 drive_period_code_max_var_pct; + u8 drive_period_code_min_var_pct; + ktime_t last_sc_time; + u8 sc_count; + bool perm_disable; + atomic_t state; + bool module_en; + bool lra_auto_mode; + bool play_irq_en; + bool auto_res_err_recovery_hw; + bool vcc_pon_enabled; +}; + +static int qpnp_haptics_parse_buffer_dt(struct hap_chip *chip); +static int qpnp_haptics_parse_pwm_dt(struct hap_chip *chip); + +static int qpnp_haptics_read_reg(struct hap_chip *chip, u16 addr, u8 *val, + int len) +{ + int rc; + + rc = regmap_bulk_read(chip->regmap, addr, val, len); + if (rc < 0) + pr_err("Error reading address: 0x%x - rc %d\n", addr, rc); + + return rc; +} + +static inline bool is_secure(u16 addr) +{ + return ((addr & 0xFF) > 0xD0); +} + +static int qpnp_haptics_write_reg(struct hap_chip *chip, u16 addr, u8 *val, + int len) +{ + unsigned long flags; + unsigned int unlock = 0xA5; + int rc = 0, i; + + spin_lock_irqsave(&chip->bus_lock, flags); + + if (is_secure(addr)) { + for (i = 0; i < len; i++) { + rc = regmap_write(chip->regmap, + HAP_SEC_ACCESS_REG(chip), unlock); + if (rc < 0) { + pr_err("Error writing unlock code - rc %d\n", + rc); + goto out; + } + + rc = regmap_write(chip->regmap, addr + i, val[i]); + if (rc < 0) { + pr_err("Error writing address 0x%x - rc %d\n", + addr + i, rc); + goto out; + } + } + } else { + if (len > 1) + rc = regmap_bulk_write(chip->regmap, addr, val, len); + else + rc = regmap_write(chip->regmap, addr, *val); + } + + if (rc < 0) + pr_err("Error writing address: 0x%x - rc %d\n", addr, rc); + +out: + spin_unlock_irqrestore(&chip->bus_lock, flags); + return rc; +} + +static int qpnp_haptics_masked_write_reg(struct hap_chip *chip, u16 addr, + u8 mask, u8 val) +{ + unsigned long flags; + unsigned int unlock = 0xA5; + int rc; + + spin_lock_irqsave(&chip->bus_lock, flags); + if (is_secure(addr)) { + rc = regmap_write(chip->regmap, HAP_SEC_ACCESS_REG(chip), + unlock); + if (rc < 0) { + pr_err("Error writing unlock code - rc %d\n", rc); + goto out; + } + } + + rc = regmap_update_bits(chip->regmap, addr, mask, val); + if (rc < 0) + pr_err("Error writing address: 0x%x - rc %d\n", addr, rc); + + if (!rc) + pr_debug("wrote to address 0x%x = 0x%x\n", addr, val); +out: + spin_unlock_irqrestore(&chip->bus_lock, flags); + return rc; +} + +static inline int get_buffer_mode_duration(struct hap_chip *chip) +{ + int sample_count, sample_duration; + + sample_count = chip->wave_rep_cnt * chip->wave_s_rep_cnt * + chip->wf_samp_len; + sample_duration = sample_count * chip->wave_play_rate_us; + pr_debug("sample_count: %d sample_duration: %d\n", sample_count, + sample_duration); + + return (sample_duration / 1000); +} + +static bool is_sw_lra_auto_resonance_control(struct hap_chip *chip) +{ + if (chip->act_type != HAP_LRA) + return false; + + if (chip->auto_res_err_recovery_hw) + return false; + + /* + * For short pattern in auto mode, we use buffer mode and auto + * resonance is not needed. + */ + if (chip->lra_auto_mode && chip->play_mode == HAP_BUFFER) + return false; + + return true; +} + +#define HAPTICS_BACK_EMF_DELAY_US 20000 +static int qpnp_haptics_auto_res_enable(struct hap_chip *chip, bool enable) +{ + int rc = 0; + u32 delay_us = HAPTICS_BACK_EMF_DELAY_US; + u8 val; + bool auto_res_mode_qwd; + + if (chip->act_type != HAP_LRA) + return 0; + + if (chip->revid->pmic_subtype == PM660_SUBTYPE) + auto_res_mode_qwd = (chip->ares_cfg.auto_res_mode == + HAP_PM660_AUTO_RES_QWD); + else + auto_res_mode_qwd = (chip->ares_cfg.auto_res_mode == + HAP_AUTO_RES_QWD); + + /* + * Do not enable auto resonance if auto mode is enabled and auto + * resonance mode is QWD, meaning long pattern. + */ + if (chip->lra_auto_mode && auto_res_mode_qwd && enable) { + pr_debug("auto_mode enabled, not enabling auto_res\n"); + return 0; + } + + /* + * For auto resonance detection to work properly, sufficient back-emf + * has to be generated. In general, back-emf takes some time to build + * up. When the auto resonance mode is chosen as QWD, high-z will be + * applied for every LRA cycle and hence there won't be enough back-emf + * at the start-up. Hence, the motor needs to vibrate for few LRA cycles + * after the PLAY bit is asserted. Enable the auto resonance after + * 'time_required_to_generate_back_emf_us' is completed. + */ + + if (auto_res_mode_qwd && enable) + usleep_range(delay_us, delay_us + 1); + + val = enable ? AUTO_RES_EN_BIT : 0; + + if (chip->revid->pmic_subtype == PM660_SUBTYPE) + rc = qpnp_haptics_masked_write_reg(chip, + HAP_AUTO_RES_CTRL_REG(chip), + AUTO_RES_EN_BIT, val); + else + rc = qpnp_haptics_masked_write_reg(chip, HAP_TEST2_REG(chip), + AUTO_RES_EN_BIT, val); + if (rc < 0) + return rc; + + if (enable) + chip->status_flags |= AUTO_RESONANCE_ENABLED; + else + chip->status_flags &= ~AUTO_RESONANCE_ENABLED; + + pr_debug("auto_res %sabled\n", enable ? "en" : "dis"); + return rc; +} + +static int qpnp_haptics_update_rate_cfg(struct hap_chip *chip, u16 play_rate) +{ + int rc; + u8 val[2]; + + if (chip->last_rate_cfg == play_rate) { + pr_debug("Same rate_cfg %x\n", play_rate); + return 0; + } + + val[0] = play_rate & HAP_RATE_CFG1_MASK; + val[1] = (play_rate >> HAP_RATE_CFG2_SHIFT) & HAP_RATE_CFG2_MASK; + rc = qpnp_haptics_write_reg(chip, HAP_RATE_CFG1_REG(chip), val, 2); + if (rc < 0) + return rc; + + pr_debug("Play rate code 0x%x\n", play_rate); + chip->last_rate_cfg = play_rate; + return 0; +} + +static void qpnp_haptics_update_lra_frequency(struct hap_chip *chip) +{ + u8 lra_auto_res[2], val; + u32 play_rate_code; + u16 rate_cfg; + int rc; + + rc = qpnp_haptics_read_reg(chip, HAP_LRA_AUTO_RES_LO_REG(chip), + lra_auto_res, 2); + if (rc < 0) { + pr_err("Error in reading LRA_AUTO_RES_LO/HI, rc=%d\n", rc); + return; + } + + play_rate_code = + (lra_auto_res[1] & 0xF0) << 4 | (lra_auto_res[0] & 0xFF); + + pr_debug("lra_auto_res_lo = 0x%x lra_auto_res_hi = 0x%x play_rate_code = 0x%x\n", + lra_auto_res[0], lra_auto_res[1], play_rate_code); + + rc = qpnp_haptics_read_reg(chip, HAP_STATUS_1_REG(chip), &val, 1); + if (rc < 0) + return; + + /* + * If the drive period code read from AUTO_RES_LO and AUTO_RES_HI + * registers is more than the max limit percent variation or less + * than the min limit percent variation specified through DT, then + * auto-resonance is disabled. + */ + + if ((val & AUTO_RES_ERROR_BIT) || + ((play_rate_code <= chip->drive_period_code_min_limit) || + (play_rate_code >= chip->drive_period_code_max_limit))) { + if (val & AUTO_RES_ERROR_BIT) + pr_debug("Auto-resonance error %x\n", val); + else + pr_debug("play rate %x out of bounds [min: 0x%x, max: 0x%x]\n", + play_rate_code, + chip->drive_period_code_min_limit, + chip->drive_period_code_max_limit); + rc = qpnp_haptics_auto_res_enable(chip, false); + if (rc < 0) + pr_debug("Auto-resonance disable failed\n"); + return; + } + + /* + * bits[7:4] of AUTO_RES_HI should be written to bits[3:0] of RATE_CFG2 + */ + lra_auto_res[1] >>= 4; + rate_cfg = lra_auto_res[1] << 8 | lra_auto_res[0]; + rc = qpnp_haptics_update_rate_cfg(chip, rate_cfg); + if (rc < 0) + pr_debug("Error in updating rate_cfg\n"); +} + +#define MAX_RETRIES 5 +#define HAP_CYCLES 4 +static bool is_haptics_idle(struct hap_chip *chip) +{ + unsigned long wait_time_us; + int rc, i; + u8 val; + + rc = qpnp_haptics_read_reg(chip, HAP_STATUS_1_REG(chip), &val, 1); + if (rc < 0) + return false; + + if (!(val & HAP_BUSY_BIT)) + return true; + + if (chip->play_time_ms <= 20) + wait_time_us = chip->play_time_ms * 1000; + else + wait_time_us = chip->wave_play_rate_us * HAP_CYCLES; + + for (i = 0; i < MAX_RETRIES; i++) { + /* wait for play_rate cycles */ + usleep_range(wait_time_us, wait_time_us + 1); + + if (chip->play_mode == HAP_DIRECT || + chip->play_mode == HAP_PWM) + return true; + + rc = qpnp_haptics_read_reg(chip, HAP_STATUS_1_REG(chip), &val, + 1); + if (rc < 0) + return false; + + if (!(val & HAP_BUSY_BIT)) + return true; + } + + if (i >= MAX_RETRIES && (val & HAP_BUSY_BIT)) { + pr_debug("Haptics Busy after %d retries\n", i); + return false; + } + + return true; +} + +static int qpnp_haptics_mod_enable(struct hap_chip *chip, bool enable) +{ + u8 val; + int rc; + + if (chip->module_en == enable) + return 0; + + if (!enable) { + if (!is_haptics_idle(chip)) + pr_debug("Disabling module forcibly\n"); + } + + val = enable ? HAP_EN_BIT : 0; + rc = qpnp_haptics_write_reg(chip, HAP_EN_CTL_REG(chip), &val, 1); + if (rc < 0) + return rc; + + chip->module_en = enable; + return 0; +} + +static int qpnp_haptics_play_control(struct hap_chip *chip, + enum hap_play_control ctrl) +{ + u8 val; + int rc; + + switch (ctrl) { + case HAP_STOP: + val = 0; + break; + case HAP_PAUSE: + val = PAUSE_BIT; + break; + case HAP_PLAY: + val = PLAY_BIT; + break; + default: + return 0; + } + + rc = qpnp_haptics_write_reg(chip, HAP_PLAY_REG(chip), &val, 1); + if (rc < 0) { + pr_err("Error in writing to PLAY_REG, rc=%d\n", rc); + return rc; + } + + pr_debug("haptics play ctrl: %d\n", ctrl); + return rc; +} + +#define AUTO_RES_ERR_POLL_TIME_NS (20 * NSEC_PER_MSEC) +static int qpnp_haptics_play(struct hap_chip *chip, bool enable) +{ + int rc = 0, time_ms = chip->play_time_ms; + + if (chip->perm_disable && enable) + return 0; + + mutex_lock(&chip->play_lock); + + if (enable) { + if (chip->play_mode == HAP_PWM) { + rc = pwm_enable(chip->pwm_data.pwm_dev); + if (rc < 0) { + pr_err("Error in enabling PWM, rc=%d\n", rc); + goto out; + } + } + + rc = qpnp_haptics_auto_res_enable(chip, false); + if (rc < 0) { + pr_err("Error in disabling auto_res, rc=%d\n", rc); + goto out; + } + + rc = qpnp_haptics_mod_enable(chip, true); + if (rc < 0) { + pr_err("Error in enabling module, rc=%d\n", rc); + goto out; + } + + rc = qpnp_haptics_play_control(chip, HAP_PLAY); + if (rc < 0) { + pr_err("Error in enabling play, rc=%d\n", rc); + goto out; + } + + if (chip->play_mode == HAP_BUFFER) + time_ms = get_buffer_mode_duration(chip); + hrtimer_start(&chip->stop_timer, + ktime_set(time_ms / MSEC_PER_SEC, + (time_ms % MSEC_PER_SEC) * NSEC_PER_MSEC), + HRTIMER_MODE_REL); + + rc = qpnp_haptics_auto_res_enable(chip, true); + if (rc < 0) { + pr_err("Error in enabling auto_res, rc=%d\n", rc); + goto out; + } + + if (is_sw_lra_auto_resonance_control(chip)) + hrtimer_start(&chip->auto_res_err_poll_timer, + ktime_set(0, AUTO_RES_ERR_POLL_TIME_NS), + HRTIMER_MODE_REL); + } else { + rc = qpnp_haptics_play_control(chip, HAP_STOP); + if (rc < 0) { + pr_err("Error in disabling play, rc=%d\n", rc); + goto out; + } + + if (is_sw_lra_auto_resonance_control(chip)) { + if (chip->status_flags & AUTO_RESONANCE_ENABLED) + qpnp_haptics_update_lra_frequency(chip); + hrtimer_cancel(&chip->auto_res_err_poll_timer); + } + + if (chip->play_mode == HAP_PWM) + pwm_disable(chip->pwm_data.pwm_dev); + + if (chip->play_mode == HAP_BUFFER) + chip->wave_samp_idx = 0; + } + +out: + mutex_unlock(&chip->play_lock); + return rc; +} + +static void qpnp_haptics_work(struct work_struct *work) +{ + struct hap_chip *chip = container_of(work, struct hap_chip, + haptics_work); + int rc; + bool enable; + + enable = atomic_read(&chip->state); + pr_debug("state: %d\n", enable); + + if (chip->vcc_pon && enable && !chip->vcc_pon_enabled) { + rc = regulator_enable(chip->vcc_pon); + if (rc < 0) + pr_err("%s: could not enable vcc_pon regulator rc=%d\n", + __func__, rc); + else + chip->vcc_pon_enabled = true; + } + + rc = qpnp_haptics_play(chip, enable); + if (rc < 0) + pr_err("Error in %sing haptics, rc=%d\n", + enable ? "play" : "stopp", rc); + + if (chip->vcc_pon && !enable && chip->vcc_pon_enabled) { + rc = regulator_disable(chip->vcc_pon); + if (rc) + pr_err("%s: could not disable vcc_pon regulator rc=%d\n", + __func__, rc); + else + chip->vcc_pon_enabled = false; + } +} + +static enum hrtimer_restart hap_stop_timer(struct hrtimer *timer) +{ + struct hap_chip *chip = container_of(timer, struct hap_chip, + stop_timer); + + atomic_set(&chip->state, 0); + schedule_work(&chip->haptics_work); + + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart hap_auto_res_err_poll_timer(struct hrtimer *timer) +{ + struct hap_chip *chip = container_of(timer, struct hap_chip, + auto_res_err_poll_timer); + + if (!(chip->status_flags & AUTO_RESONANCE_ENABLED)) + return HRTIMER_NORESTART; + + qpnp_haptics_update_lra_frequency(chip); + hrtimer_forward(&chip->auto_res_err_poll_timer, ktime_get(), + ktime_set(0, AUTO_RES_ERR_POLL_TIME_NS)); + + return HRTIMER_NORESTART; +} + +static int qpnp_haptics_suspend(struct device *dev) +{ + struct hap_chip *chip = dev_get_drvdata(dev); + int rc; + + rc = qpnp_haptics_play(chip, false); + if (rc < 0) + pr_err("Error in stopping haptics, rc=%d\n", rc); + + rc = qpnp_haptics_mod_enable(chip, false); + if (rc < 0) + pr_err("Error in disabling module, rc=%d\n", rc); + + return 0; +} + +static int qpnp_haptics_wave_rep_config(struct hap_chip *chip, + enum hap_rep_type type) +{ + int rc; + u8 val = 0, mask = 0; + + if (type & HAP_WAVE_REPEAT) { + if (chip->wave_rep_cnt < WF_REPEAT_MIN) + chip->wave_rep_cnt = WF_REPEAT_MIN; + else if (chip->wave_rep_cnt > WF_REPEAT_MAX) + chip->wave_rep_cnt = WF_REPEAT_MAX; + mask = WF_REPEAT_MASK; + val = ilog2(chip->wave_rep_cnt) << WF_REPEAT_SHIFT; + } + + if (type & HAP_WAVE_SAMP_REPEAT) { + if (chip->wave_s_rep_cnt < WF_S_REPEAT_MIN) + chip->wave_s_rep_cnt = WF_S_REPEAT_MIN; + else if (chip->wave_s_rep_cnt > WF_S_REPEAT_MAX) + chip->wave_s_rep_cnt = WF_S_REPEAT_MAX; + mask |= WF_S_REPEAT_MASK; + val |= ilog2(chip->wave_s_rep_cnt); + } + + rc = qpnp_haptics_masked_write_reg(chip, HAP_WF_REPEAT_REG(chip), + mask, val); + return rc; +} + +/* configuration api for buffer mode */ +static int qpnp_haptics_buffer_config(struct hap_chip *chip, u32 *wave_samp, + bool overdrive) +{ + u8 buf[HAP_WAVE_SAMP_LEN]; + u32 *ptr; + int rc, i; + + if (wave_samp) { + ptr = wave_samp; + } else { + if (chip->wave_samp_idx >= ARRAY_SIZE(chip->wave_samp)) { + pr_err("Incorrect wave_samp_idx %d\n", + chip->wave_samp_idx); + return -EINVAL; + } + + ptr = &chip->wave_samp[chip->wave_samp_idx]; + } + + /* Don't set override bit in waveform sample for PM660 */ + if (chip->revid->pmic_subtype == PM660_SUBTYPE) + overdrive = false; + + /* Configure WAVE_SAMPLE1 to WAVE_SAMPLE8 register */ + for (i = 0; i < HAP_WAVE_SAMP_LEN; i++) { + buf[i] = ptr[i]; + if (buf[i]) + buf[i] |= (overdrive ? HAP_WF_OVD_BIT : 0); + } + + rc = qpnp_haptics_write_reg(chip, HAP_WF_S1_REG(chip), buf, + HAP_WAVE_SAMP_LEN); + return rc; +} + +/* configuration api for pwm */ +static int qpnp_haptics_pwm_config(struct hap_chip *chip) +{ + u8 val = 0; + int rc; + + if (chip->ext_pwm_freq_khz == 0) + return 0; + + /* Configure the EXTERNAL_PWM register */ + if (chip->ext_pwm_freq_khz <= EXT_PWM_FREQ_25_KHZ) { + chip->ext_pwm_freq_khz = EXT_PWM_FREQ_25_KHZ; + val = 0; + } else if (chip->ext_pwm_freq_khz <= EXT_PWM_FREQ_50_KHZ) { + chip->ext_pwm_freq_khz = EXT_PWM_FREQ_50_KHZ; + val = 1; + } else if (chip->ext_pwm_freq_khz <= EXT_PWM_FREQ_75_KHZ) { + chip->ext_pwm_freq_khz = EXT_PWM_FREQ_75_KHZ; + val = 2; + } else { + chip->ext_pwm_freq_khz = EXT_PWM_FREQ_100_KHZ; + val = 3; + } + + rc = qpnp_haptics_masked_write_reg(chip, HAP_EXT_PWM_REG(chip), + EXT_PWM_FREQ_SEL_MASK, val); + if (rc < 0) + return rc; + + if (chip->ext_pwm_dtest_line < 0 || + chip->ext_pwm_dtest_line > PWM_MAX_DTEST_LINES) { + pr_err("invalid dtest line\n"); + return -EINVAL; + } + + if (chip->ext_pwm_dtest_line > 0) { + /* disable auto res for PWM mode */ + val = chip->ext_pwm_dtest_line << HAP_EXT_PWM_DTEST_SHIFT; + rc = qpnp_haptics_masked_write_reg(chip, HAP_TEST2_REG(chip), + HAP_EXT_PWM_DTEST_MASK | AUTO_RES_EN_BIT, val); + if (rc < 0) + return rc; + } + + rc = pwm_config(chip->pwm_data.pwm_dev, + chip->pwm_data.duty_us * NSEC_PER_USEC, + chip->pwm_data.period_us * NSEC_PER_USEC); + if (rc < 0) { + pr_err("pwm_config failed, rc=%d\n", rc); + return rc; + } + + return 0; +} + +static int qpnp_haptics_lra_auto_res_config(struct hap_chip *chip, + struct hap_lra_ares_param *tmp_cfg) +{ + struct hap_lra_ares_param *ares_cfg; + int rc; + u8 val = 0, mask = 0; + + /* disable auto resonance for ERM */ + if (chip->act_type == HAP_ERM) { + val = 0x00; + rc = qpnp_haptics_write_reg(chip, HAP_LRA_AUTO_RES_REG(chip), + &val, 1); + return rc; + } + + if (chip->auto_res_err_recovery_hw) { + rc = qpnp_haptics_masked_write_reg(chip, + HAP_AUTO_RES_CTRL_REG(chip), + AUTO_RES_ERR_RECOVERY_BIT, AUTO_RES_ERR_RECOVERY_BIT); + if (rc < 0) + return rc; + } + + if (tmp_cfg) + ares_cfg = tmp_cfg; + else + ares_cfg = &chip->ares_cfg; + + if (ares_cfg->lra_res_cal_period < HAP_RES_CAL_PERIOD_MIN) + ares_cfg->lra_res_cal_period = HAP_RES_CAL_PERIOD_MIN; + + if (chip->revid->pmic_subtype == PM660_SUBTYPE) { + if (ares_cfg->lra_res_cal_period > + HAP_PM660_RES_CAL_PERIOD_MAX) + ares_cfg->lra_res_cal_period = + HAP_PM660_RES_CAL_PERIOD_MAX; + + if (ares_cfg->auto_res_mode == HAP_PM660_AUTO_RES_QWD) + ares_cfg->lra_res_cal_period = 0; + + if (ares_cfg->lra_res_cal_period) + val = ilog2(ares_cfg->lra_res_cal_period / + HAP_RES_CAL_PERIOD_MIN) + 1; + } else { + if (ares_cfg->lra_res_cal_period > HAP_RES_CAL_PERIOD_MAX) + ares_cfg->lra_res_cal_period = + HAP_RES_CAL_PERIOD_MAX; + + if (ares_cfg->lra_res_cal_period) + val = ilog2(ares_cfg->lra_res_cal_period / + HAP_RES_CAL_PERIOD_MIN); + } + + if (chip->revid->pmic_subtype == PM660_SUBTYPE) { + val |= ares_cfg->auto_res_mode << PM660_AUTO_RES_MODE_SHIFT; + mask = PM660_AUTO_RES_MODE_BIT; + val |= ares_cfg->lra_high_z << PM660_CAL_DURATION_SHIFT; + mask |= PM660_CAL_DURATION_MASK; + if (ares_cfg->lra_qwd_drive_duration != -EINVAL) { + val |= ares_cfg->lra_qwd_drive_duration << + PM660_QWD_DRIVE_DURATION_SHIFT; + mask |= PM660_QWD_DRIVE_DURATION_BIT; + } + if (ares_cfg->calibrate_at_eop != -EINVAL) { + val |= ares_cfg->calibrate_at_eop << + PM660_CAL_EOP_SHIFT; + mask |= PM660_CAL_EOP_BIT; + } + mask |= PM660_LRA_RES_CAL_MASK; + } else { + val |= (ares_cfg->auto_res_mode << LRA_AUTO_RES_MODE_SHIFT); + val |= (ares_cfg->lra_high_z << LRA_HIGH_Z_SHIFT); + mask = LRA_AUTO_RES_MODE_MASK | LRA_HIGH_Z_MASK | + LRA_RES_CAL_MASK; + } + + pr_debug("mode: %d hi_z period: %d cal_period: %d\n", + ares_cfg->auto_res_mode, ares_cfg->lra_high_z, + ares_cfg->lra_res_cal_period); + + rc = qpnp_haptics_masked_write_reg(chip, HAP_LRA_AUTO_RES_REG(chip), + mask, val); + return rc; +} + +/* configuration api for play mode */ +static int qpnp_haptics_play_mode_config(struct hap_chip *chip) +{ + u8 val = 0; + int rc; + + if (!is_haptics_idle(chip)) + return -EBUSY; + + val = chip->play_mode << HAP_WF_SOURCE_SHIFT; + rc = qpnp_haptics_masked_write_reg(chip, HAP_SEL_REG(chip), + HAP_WF_SOURCE_MASK, val); + if (!rc) { + if (chip->play_mode == HAP_BUFFER && !chip->play_irq_en) { + enable_irq(chip->play_irq); + chip->play_irq_en = true; + } else if (chip->play_mode != HAP_BUFFER && chip->play_irq_en) { + disable_irq(chip->play_irq); + chip->play_irq_en = false; + } + } + return rc; +} + +/* configuration api for max voltage */ +static int qpnp_haptics_vmax_config(struct hap_chip *chip, int vmax_mv, + bool overdrive) +{ + u8 val = 0; + int rc; + + if (vmax_mv < 0) + return -EINVAL; + + /* Allow setting override bit in VMAX_CFG only for PM660 */ + if (chip->revid->pmic_subtype != PM660_SUBTYPE) + overdrive = false; + + if (vmax_mv < HAP_VMAX_MIN_MV) + vmax_mv = HAP_VMAX_MIN_MV; + else if (vmax_mv > HAP_VMAX_MAX_MV) + vmax_mv = HAP_VMAX_MAX_MV; + + val = DIV_ROUND_CLOSEST(vmax_mv, HAP_VMAX_MIN_MV); + val <<= HAP_VMAX_SHIFT; + if (overdrive) + val |= HAP_VMAX_OVD_BIT; + + rc = qpnp_haptics_masked_write_reg(chip, HAP_VMAX_CFG_REG(chip), + HAP_VMAX_MASK | HAP_VMAX_OVD_BIT, val); + return rc; +} + +/* configuration api for ilim */ +static int qpnp_haptics_ilim_config(struct hap_chip *chip) +{ + int rc; + + if (chip->ilim_ma < HAP_ILIM_400_MA) + chip->ilim_ma = HAP_ILIM_400_MA; + else if (chip->ilim_ma > HAP_ILIM_800_MA) + chip->ilim_ma = HAP_ILIM_800_MA; + + rc = qpnp_haptics_masked_write_reg(chip, HAP_ILIM_CFG_REG(chip), + HAP_ILIM_SEL_MASK, chip->ilim_ma); + return rc; +} + +/* configuration api for short circuit debounce */ +static int qpnp_haptics_sc_deb_config(struct hap_chip *chip) +{ + u8 val = 0; + int rc; + + if (chip->sc_deb_cycles < HAP_SC_DEB_CYCLES_MIN) + chip->sc_deb_cycles = HAP_SC_DEB_CYCLES_MIN; + else if (chip->sc_deb_cycles > HAP_SC_DEB_CYCLES_MAX) + chip->sc_deb_cycles = HAP_SC_DEB_CYCLES_MAX; + + if (chip->sc_deb_cycles != HAP_SC_DEB_CYCLES_MIN) + val = ilog2(chip->sc_deb_cycles / + HAP_DEF_SC_DEB_CYCLES) + 1; + else + val = HAP_SC_DEB_CYCLES_MIN; + + rc = qpnp_haptics_masked_write_reg(chip, HAP_SC_DEB_REG(chip), + HAP_SC_DEB_MASK, val); + + return rc; +} + +static int qpnp_haptics_brake_config(struct hap_chip *chip, u32 *brake_pat) +{ + int rc, i; + u32 temp, *ptr; + u8 val; + + /* Configure BRAKE register */ + rc = qpnp_haptics_masked_write_reg(chip, HAP_EN_CTL2_REG(chip), + BRAKE_EN_BIT, (u8)chip->en_brake); + if (rc < 0) + return rc; + + /* If braking is not enabled, skip configuring brake pattern */ + if (!chip->en_brake) + return 0; + + if (!brake_pat) + ptr = chip->brake_pat; + else + ptr = brake_pat; + + for (i = HAP_BRAKE_PAT_LEN - 1, val = 0; i >= 0; i--) { + ptr[i] &= HAP_BRAKE_PAT_MASK; + temp = i << 1; + val |= ptr[i] << temp; + } + + rc = qpnp_haptics_write_reg(chip, HAP_BRAKE_REG(chip), &val, 1); + if (rc < 0) + return rc; + + return 0; +} + +static int qpnp_haptics_auto_mode_config(struct hap_chip *chip, int time_ms) +{ + struct hap_lra_ares_param ares_cfg; + enum hap_mode old_play_mode; + u8 old_ares_mode; + u32 brake_pat[HAP_BRAKE_PAT_LEN] = {0}; + u32 wave_samp[HAP_WAVE_SAMP_LEN] = {0}; + int rc, vmax_mv; + + if (!chip->lra_auto_mode) + return false; + + /* For now, this is for LRA only */ + if (chip->act_type == HAP_ERM) + return 0; + + old_ares_mode = chip->ares_cfg.auto_res_mode; + old_play_mode = chip->play_mode; + pr_debug("auto_mode, time_ms: %d\n", time_ms); + if (time_ms <= 20) { + wave_samp[0] = HAP_WF_SAMP_MAX; + wave_samp[1] = HAP_WF_SAMP_MAX; + chip->wf_samp_len = 2; + if (time_ms > 15) { + wave_samp[2] = HAP_WF_SAMP_MAX; + chip->wf_samp_len = 3; + } + + /* short pattern */ + rc = qpnp_haptics_parse_buffer_dt(chip); + if (!rc) { + rc = qpnp_haptics_wave_rep_config(chip, + HAP_WAVE_REPEAT | HAP_WAVE_SAMP_REPEAT); + if (rc < 0) { + pr_err("Error in configuring wave_rep config %d\n", + rc); + return rc; + } + + rc = qpnp_haptics_buffer_config(chip, wave_samp, true); + if (rc < 0) { + pr_err("Error in configuring buffer mode %d\n", + rc); + return rc; + } + } + + ares_cfg.lra_high_z = HAP_LRA_HIGH_Z_OPT1; + ares_cfg.lra_res_cal_period = HAP_RES_CAL_PERIOD_MIN; + if (chip->revid->pmic_subtype == PM660_SUBTYPE) { + ares_cfg.auto_res_mode = HAP_PM660_AUTO_RES_QWD; + ares_cfg.lra_qwd_drive_duration = 0; + ares_cfg.calibrate_at_eop = 0; + } else { + ares_cfg.auto_res_mode = HAP_AUTO_RES_ZXD_EOP; + ares_cfg.lra_qwd_drive_duration = -EINVAL; + ares_cfg.calibrate_at_eop = -EINVAL; + } + + vmax_mv = HAP_VMAX_MAX_MV; + rc = qpnp_haptics_vmax_config(chip, vmax_mv, true); + if (rc < 0) + return rc; + + /* enable play_irq for buffer mode */ + if (chip->play_irq >= 0 && !chip->play_irq_en) { + enable_irq(chip->play_irq); + chip->play_irq_en = true; + } + + brake_pat[0] = BRAKE_VMAX; + chip->play_mode = HAP_BUFFER; + chip->wave_shape = HAP_WAVE_SQUARE; + } else { + /* long pattern */ + ares_cfg.lra_high_z = HAP_LRA_HIGH_Z_OPT1; + if (chip->revid->pmic_subtype == PM660_SUBTYPE) { + ares_cfg.auto_res_mode = HAP_PM660_AUTO_RES_ZXD; + ares_cfg.lra_res_cal_period = + HAP_PM660_RES_CAL_PERIOD_MAX; + ares_cfg.lra_qwd_drive_duration = 0; + ares_cfg.calibrate_at_eop = 1; + } else { + ares_cfg.auto_res_mode = HAP_AUTO_RES_QWD; + ares_cfg.lra_res_cal_period = HAP_RES_CAL_PERIOD_MAX; + ares_cfg.lra_qwd_drive_duration = -EINVAL; + ares_cfg.calibrate_at_eop = -EINVAL; + } + + vmax_mv = chip->vmax_mv; + rc = qpnp_haptics_vmax_config(chip, vmax_mv, false); + if (rc < 0) + return rc; + + /* enable play_irq for direct mode */ + if (chip->play_irq >= 0 && chip->play_irq_en) { + disable_irq(chip->play_irq); + chip->play_irq_en = false; + } + + chip->play_mode = HAP_DIRECT; + chip->wave_shape = HAP_WAVE_SINE; + } + + chip->ares_cfg.auto_res_mode = ares_cfg.auto_res_mode; + rc = qpnp_haptics_lra_auto_res_config(chip, &ares_cfg); + if (rc < 0) { + chip->ares_cfg.auto_res_mode = old_ares_mode; + return rc; + } + + rc = qpnp_haptics_play_mode_config(chip); + if (rc < 0) { + chip->play_mode = old_play_mode; + return rc; + } + + rc = qpnp_haptics_brake_config(chip, brake_pat); + if (rc < 0) + return rc; + + rc = qpnp_haptics_masked_write_reg(chip, HAP_CFG2_REG(chip), + HAP_LRA_RES_TYPE_MASK, chip->wave_shape); + if (rc < 0) + return rc; + + return 0; +} + +static irqreturn_t qpnp_haptics_play_irq_handler(int irq, void *data) +{ + struct hap_chip *chip = data; + int rc; + + if (chip->play_mode != HAP_BUFFER) + goto irq_handled; + + if (chip->wave_samp[chip->wave_samp_idx + HAP_WAVE_SAMP_LEN] > 0) { + chip->wave_samp_idx += HAP_WAVE_SAMP_LEN; + if (chip->wave_samp_idx >= ARRAY_SIZE(chip->wave_samp)) { + pr_debug("Samples over\n"); + } else { + pr_debug("moving to next sample set %d\n", + chip->wave_samp_idx); + + /* Moving to next set of wave sample */ + rc = qpnp_haptics_buffer_config(chip, NULL, false); + if (rc < 0) { + pr_err("Error in configuring buffer, rc=%d\n", + rc); + goto irq_handled; + } + } + } + +irq_handled: + return IRQ_HANDLED; +} + +#define SC_MAX_COUNT 5 +#define SC_COUNT_RST_DELAY_US 1000000 +static irqreturn_t qpnp_haptics_sc_irq_handler(int irq, void *data) +{ + struct hap_chip *chip = data; + int rc; + u8 val; + s64 sc_delta_time_us; + ktime_t temp; + + rc = qpnp_haptics_read_reg(chip, HAP_STATUS_1_REG(chip), &val, 1); + if (rc < 0) + goto irq_handled; + + if (!(val & SC_FLAG_BIT)) { + chip->sc_count = 0; + goto irq_handled; + } + + pr_debug("SC irq fired\n"); + temp = ktime_get(); + sc_delta_time_us = ktime_us_delta(temp, chip->last_sc_time); + chip->last_sc_time = temp; + + if (sc_delta_time_us > SC_COUNT_RST_DELAY_US) + chip->sc_count = 0; + else + chip->sc_count++; + + val = SC_CLR_BIT; + rc = qpnp_haptics_write_reg(chip, HAP_SC_CLR_REG(chip), &val, 1); + if (rc < 0) { + pr_err("Error in writing to SC_CLR_REG, rc=%d\n", rc); + goto irq_handled; + } + + /* Permanently disable module if SC condition persists */ + if (chip->sc_count > SC_MAX_COUNT) { + pr_crit("SC persists, permanently disabling haptics\n"); + rc = qpnp_haptics_mod_enable(chip, false); + if (rc < 0) { + pr_err("Error in disabling module, rc=%d\n", rc); + goto irq_handled; + } + chip->perm_disable = true; + } + +irq_handled: + return IRQ_HANDLED; +} + +/* All sysfs show/store functions below */ + +#define HAP_STR_SIZE 128 +static int parse_string(const char *in_buf, char *out_buf) +{ + int i; + + if (snprintf(out_buf, HAP_STR_SIZE, "%s", in_buf) > HAP_STR_SIZE) + return -EINVAL; + + for (i = 0; i < strlen(out_buf); i++) { + if (out_buf[i] == ' ' || out_buf[i] == '\n' || + out_buf[i] == '\t') { + out_buf[i] = '\0'; + break; + } + } + + return 0; +} + +static ssize_t qpnp_haptics_show_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + + return snprintf(buf, PAGE_SIZE, "%d\n", chip->module_en); +} + +static ssize_t qpnp_haptics_store_state(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + + /* At present, nothing to do with setting state */ + return count; +} + +static ssize_t qpnp_haptics_show_duration(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + ktime_t time_rem; + s64 time_us = 0; + + if (hrtimer_active(&chip->stop_timer)) { + time_rem = hrtimer_get_remaining(&chip->stop_timer); + time_us = ktime_to_us(time_rem); + } + + return snprintf(buf, PAGE_SIZE, "%lld\n", div_s64(time_us, 1000)); +} + +static ssize_t qpnp_haptics_store_duration(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + u32 val; + int rc; + + rc = kstrtouint(buf, 0, &val); + if (rc < 0) + return rc; + + /* setting 0 on duration is NOP for now */ + if (val <= 0) + return count; + + if (val > chip->max_play_time_ms) + return -EINVAL; + + mutex_lock(&chip->param_lock); + rc = qpnp_haptics_auto_mode_config(chip, val); + if (rc < 0) { + pr_err("Unable to do auto mode config\n"); + mutex_unlock(&chip->param_lock); + return rc; + } + + chip->play_time_ms = val; + mutex_unlock(&chip->param_lock); + + return count; +} + +static ssize_t qpnp_haptics_show_activate(struct device *dev, + struct device_attribute *attr, char *buf) +{ + /* For now nothing to show */ + return snprintf(buf, PAGE_SIZE, "%d\n", 0); +} + +static ssize_t qpnp_haptics_store_activate(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + u32 val; + int rc; + + rc = kstrtouint(buf, 0, &val); + if (rc < 0) + return rc; + + if (val != 0 && val != 1) + return count; + + if (val) { + hrtimer_cancel(&chip->stop_timer); + if (is_sw_lra_auto_resonance_control(chip)) + hrtimer_cancel(&chip->auto_res_err_poll_timer); + cancel_work_sync(&chip->haptics_work); + + atomic_set(&chip->state, 1); + schedule_work(&chip->haptics_work); + } else { + rc = qpnp_haptics_mod_enable(chip, false); + if (rc < 0) { + pr_err("Error in disabling module, rc=%d\n", rc); + return rc; + } + } + + return count; +} + +static ssize_t qpnp_haptics_show_play_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + char *str; + + if (chip->play_mode == HAP_BUFFER) + str = "buffer"; + else if (chip->play_mode == HAP_DIRECT) + str = "direct"; + else if (chip->play_mode == HAP_AUDIO) + str = "audio"; + else if (chip->play_mode == HAP_PWM) + str = "pwm"; + else + return -EINVAL; + + return snprintf(buf, PAGE_SIZE, "%s\n", str); +} + +static ssize_t qpnp_haptics_store_play_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + char str[HAP_STR_SIZE + 1]; + int rc = 0, temp, old_mode; + + rc = parse_string(buf, str); + if (rc < 0) + return rc; + + if (strcmp(str, "buffer") == 0) + temp = HAP_BUFFER; + else if (strcmp(str, "direct") == 0) + temp = HAP_DIRECT; + else if (strcmp(str, "audio") == 0) + temp = HAP_AUDIO; + else if (strcmp(str, "pwm") == 0) + temp = HAP_PWM; + else + return -EINVAL; + + if (temp == chip->play_mode) + return count; + + if (temp == HAP_BUFFER) { + rc = qpnp_haptics_parse_buffer_dt(chip); + if (!rc) { + rc = qpnp_haptics_wave_rep_config(chip, + HAP_WAVE_REPEAT | HAP_WAVE_SAMP_REPEAT); + if (rc < 0) { + pr_err("Error in configuring wave_rep config %d\n", + rc); + return rc; + } + } + + rc = qpnp_haptics_buffer_config(chip, NULL, true); + } else if (temp == HAP_PWM) { + rc = qpnp_haptics_parse_pwm_dt(chip); + if (!rc) + rc = qpnp_haptics_pwm_config(chip); + } + + if (rc < 0) + return rc; + + rc = qpnp_haptics_mod_enable(chip, false); + if (rc < 0) + return rc; + + old_mode = chip->play_mode; + chip->play_mode = temp; + rc = qpnp_haptics_play_mode_config(chip); + if (rc < 0) { + chip->play_mode = old_mode; + return rc; + } + + if (chip->play_mode == HAP_AUDIO) { + rc = qpnp_haptics_mod_enable(chip, true); + if (rc < 0) { + chip->play_mode = old_mode; + return rc; + } + } + + return count; +} + +static ssize_t qpnp_haptics_show_wf_samp(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + char str[HAP_STR_SIZE + 1]; + char *ptr = str; + int i, len = 0; + + for (i = 0; i < ARRAY_SIZE(chip->wave_samp); i++) { + len = scnprintf(ptr, HAP_STR_SIZE, "%x ", chip->wave_samp[i]); + ptr += len; + } + ptr[len] = '\0'; + + return snprintf(buf, PAGE_SIZE, "%s\n", str); +} + +static ssize_t qpnp_haptics_store_wf_samp(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + u8 samp[HAP_WAVE_SAMP_SET_LEN] = {0}; + int bytes_read, rc; + unsigned int data, pos = 0, i = 0; + + while (pos < count && i < ARRAY_SIZE(samp) && + sscanf(buf + pos, "%x%n", &data, &bytes_read) == 1) { + /* bit 0 is not used in WF_Sx */ + samp[i++] = data & GENMASK(7, 1); + pos += bytes_read; + } + + chip->wf_samp_len = i; + for (i = 0; i < ARRAY_SIZE(chip->wave_samp); i++) + chip->wave_samp[i] = samp[i]; + + rc = qpnp_haptics_buffer_config(chip, NULL, false); + if (rc < 0) { + pr_err("Error in configuring buffer mode %d\n", rc); + return rc; + } + + return count; +} + +static ssize_t qpnp_haptics_show_wf_rep_count(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + + return snprintf(buf, PAGE_SIZE, "%d\n", chip->wave_rep_cnt); +} + +static ssize_t qpnp_haptics_store_wf_rep_count(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + int data, rc, old_wave_rep_cnt; + + rc = kstrtoint(buf, 10, &data); + if (rc < 0) + return rc; + + old_wave_rep_cnt = chip->wave_rep_cnt; + chip->wave_rep_cnt = data; + rc = qpnp_haptics_wave_rep_config(chip, HAP_WAVE_REPEAT); + if (rc < 0) { + chip->wave_rep_cnt = old_wave_rep_cnt; + return rc; + } + + return count; +} + +static ssize_t qpnp_haptics_show_wf_s_rep_count(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + + return snprintf(buf, PAGE_SIZE, "%d\n", chip->wave_s_rep_cnt); +} + +static ssize_t qpnp_haptics_store_wf_s_rep_count(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + int data, rc, old_wave_s_rep_cnt; + + rc = kstrtoint(buf, 10, &data); + if (rc < 0) + return rc; + + old_wave_s_rep_cnt = chip->wave_s_rep_cnt; + chip->wave_s_rep_cnt = data; + rc = qpnp_haptics_wave_rep_config(chip, HAP_WAVE_SAMP_REPEAT); + if (rc < 0) { + chip->wave_s_rep_cnt = old_wave_s_rep_cnt; + return rc; + } + + return count; +} + +static ssize_t qpnp_haptics_show_vmax(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + + return snprintf(buf, PAGE_SIZE, "%d\n", chip->vmax_mv); +} + +static ssize_t qpnp_haptics_store_vmax(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + int data, rc, old_vmax_mv; + + rc = kstrtoint(buf, 10, &data); + if (rc < 0) + return rc; + + old_vmax_mv = chip->vmax_mv; + chip->vmax_mv = data; + rc = qpnp_haptics_vmax_config(chip, chip->vmax_mv, false); + if (rc < 0) { + chip->vmax_mv = old_vmax_mv; + return rc; + } + + return count; +} + +static ssize_t qpnp_haptics_show_lra_auto_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + + return snprintf(buf, PAGE_SIZE, "%d\n", chip->lra_auto_mode); +} + +static ssize_t qpnp_haptics_store_lra_auto_mode(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct led_classdev *cdev = dev_get_drvdata(dev); + struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev); + int rc, data; + + rc = kstrtoint(buf, 10, &data); + if (rc < 0) + return rc; + + if (data != 0 && data != 1) + return count; + + chip->lra_auto_mode = !!data; + return count; +} + +static struct device_attribute qpnp_haptics_attrs[] = { + __ATTR(state, 0664, qpnp_haptics_show_state, qpnp_haptics_store_state), + __ATTR(duration, 0664, qpnp_haptics_show_duration, + qpnp_haptics_store_duration), + __ATTR(activate, 0664, qpnp_haptics_show_activate, + qpnp_haptics_store_activate), + __ATTR(play_mode, 0664, qpnp_haptics_show_play_mode, + qpnp_haptics_store_play_mode), + __ATTR(wf_samp, 0664, qpnp_haptics_show_wf_samp, + qpnp_haptics_store_wf_samp), + __ATTR(wf_rep_count, 0664, qpnp_haptics_show_wf_rep_count, + qpnp_haptics_store_wf_rep_count), + __ATTR(wf_s_rep_count, 0664, qpnp_haptics_show_wf_s_rep_count, + qpnp_haptics_store_wf_s_rep_count), + __ATTR(vmax_mv, 0664, qpnp_haptics_show_vmax, qpnp_haptics_store_vmax), + __ATTR(lra_auto_mode, 0664, qpnp_haptics_show_lra_auto_mode, + qpnp_haptics_store_lra_auto_mode), +}; + +/* Dummy functions for brightness */ +static +enum led_brightness qpnp_haptics_brightness_get(struct led_classdev *cdev) +{ + return 0; +} + +static void qpnp_haptics_brightness_set(struct led_classdev *cdev, + enum led_brightness level) +{ +} + +static int qpnp_haptics_config(struct hap_chip *chip) +{ + u8 rc_clk_err_deci_pct; + u16 play_rate = 0; + int rc; + + /* Configure the CFG1 register for actuator type */ + rc = qpnp_haptics_masked_write_reg(chip, HAP_CFG1_REG(chip), + HAP_ACT_TYPE_MASK, chip->act_type); + if (rc < 0) + return rc; + + /* Configure auto resonance parameters */ + rc = qpnp_haptics_lra_auto_res_config(chip, NULL); + if (rc < 0) + return rc; + + /* Configure the PLAY MODE register */ + rc = qpnp_haptics_play_mode_config(chip); + if (rc < 0) + return rc; + + /* Configure the VMAX register */ + rc = qpnp_haptics_vmax_config(chip, chip->vmax_mv, false); + if (rc < 0) + return rc; + + /* Configure the ILIM register */ + rc = qpnp_haptics_ilim_config(chip); + if (rc < 0) + return rc; + + /* Configure the short circuit debounce register */ + rc = qpnp_haptics_sc_deb_config(chip); + if (rc < 0) + return rc; + + /* Configure the WAVE SHAPE register */ + rc = qpnp_haptics_masked_write_reg(chip, HAP_CFG2_REG(chip), + HAP_LRA_RES_TYPE_MASK, chip->wave_shape); + if (rc < 0) + return rc; + + play_rate = chip->wave_play_rate_us / HAP_RATE_CFG_STEP_US; + + /* + * The frequency of 19.2 MHz RC clock is subject to variation. Currently + * some PMI chips have MISC_TRIM_ERROR_RC19P2_CLK register present in + * MISC peripheral. This register holds the trim error of RC clock. + */ + if (chip->act_type == HAP_LRA && chip->misc_clk_trim_error_reg) { + /* + * Error is available in bits[3:0] and each LSB is 0.7%. + * Bit 7 is the sign bit for error code. If it is set, then a + * negative error correction needs to be made. Otherwise, a + * positive error correction needs to be made. + */ + rc_clk_err_deci_pct = (chip->clk_trim_error_code & 0x0F) * 7; + if (chip->clk_trim_error_code & BIT(7)) + play_rate = (play_rate * + (1000 - rc_clk_err_deci_pct)) / 1000; + else + play_rate = (play_rate * + (1000 + rc_clk_err_deci_pct)) / 1000; + + pr_debug("TRIM register = 0x%x, play_rate=%d\n", + chip->clk_trim_error_code, play_rate); + } + + /* + * Configure RATE_CFG1 and RATE_CFG2 registers. + * Note: For ERM these registers act as play rate and + * for LRA these represent resonance period + */ + rc = qpnp_haptics_update_rate_cfg(chip, play_rate); + if (chip->act_type == HAP_LRA) { + chip->drive_period_code_max_limit = (play_rate * + (100 + chip->drive_period_code_max_var_pct)) / 100; + chip->drive_period_code_min_limit = (play_rate * + (100 - chip->drive_period_code_min_var_pct)) / 100; + pr_debug("Drive period code max limit %x min limit %x\n", + chip->drive_period_code_max_limit, + chip->drive_period_code_min_limit); + } + + rc = qpnp_haptics_brake_config(chip, NULL); + if (rc < 0) + return rc; + + if (chip->play_mode == HAP_BUFFER) { + rc = qpnp_haptics_wave_rep_config(chip, + HAP_WAVE_REPEAT | HAP_WAVE_SAMP_REPEAT); + if (rc < 0) + return rc; + + rc = qpnp_haptics_buffer_config(chip, NULL, false); + } else if (chip->play_mode == HAP_PWM) { + rc = qpnp_haptics_pwm_config(chip); + } else if (chip->play_mode == HAP_AUDIO) { + rc = qpnp_haptics_mod_enable(chip, true); + } + + if (rc < 0) + return rc; + + /* setup play irq */ + if (chip->play_irq >= 0) { + rc = devm_request_threaded_irq(&chip->pdev->dev, chip->play_irq, + NULL, qpnp_haptics_play_irq_handler, IRQF_ONESHOT, + "haptics_play_irq", chip); + if (rc < 0) { + pr_err("Unable to request play(%d) IRQ(err:%d)\n", + chip->play_irq, rc); + return rc; + } + + /* use play_irq only for buffer mode */ + if (chip->play_mode != HAP_BUFFER) { + disable_irq(chip->play_irq); + chip->play_irq_en = false; + } + } + + /* setup short circuit irq */ + if (chip->sc_irq >= 0) { + rc = devm_request_threaded_irq(&chip->pdev->dev, chip->sc_irq, + NULL, qpnp_haptics_sc_irq_handler, IRQF_ONESHOT, + "haptics_sc_irq", chip); + if (rc < 0) { + pr_err("Unable to request sc(%d) IRQ(err:%d)\n", + chip->sc_irq, rc); + return rc; + } + } + + return rc; +} + +static int qpnp_haptics_parse_buffer_dt(struct hap_chip *chip) +{ + struct device_node *node = chip->pdev->dev.of_node; + u32 temp; + int rc, i, wf_samp_len; + + if (chip->wave_rep_cnt > 0 || chip->wave_s_rep_cnt > 0) + return 0; + + chip->wave_rep_cnt = WF_REPEAT_MIN; + rc = of_property_read_u32(node, "qcom,wave-rep-cnt", &temp); + if (!rc) { + chip->wave_rep_cnt = temp; + } else if (rc != -EINVAL) { + pr_err("Unable to read rep cnt rc=%d\n", rc); + return rc; + } + + chip->wave_s_rep_cnt = WF_S_REPEAT_MIN; + rc = of_property_read_u32(node, + "qcom,wave-samp-rep-cnt", &temp); + if (!rc) { + chip->wave_s_rep_cnt = temp; + } else if (rc != -EINVAL) { + pr_err("Unable to read samp rep cnt rc=%d\n", rc); + return rc; + } + + wf_samp_len = of_property_count_elems_of_size(node, + "qcom,wave-samples", sizeof(u32)); + if (wf_samp_len > 0) { + if (wf_samp_len > HAP_WAVE_SAMP_SET_LEN) { + pr_err("Invalid length for wave samples\n"); + return -EINVAL; + } + + rc = of_property_read_u32_array(node, "qcom,wave-samples", + chip->wave_samp, wf_samp_len); + if (rc < 0) { + pr_err("Error in reading qcom,wave-samples, rc=%d\n", + rc); + return rc; + } + } else { + /* Use default values */ + for (i = 0; i < HAP_WAVE_SAMP_LEN; i++) + chip->wave_samp[i] = HAP_WF_SAMP_MAX; + + wf_samp_len = HAP_WAVE_SAMP_LEN; + } + chip->wf_samp_len = wf_samp_len; + + return 0; +} + +static int qpnp_haptics_parse_pwm_dt(struct hap_chip *chip) +{ + struct device_node *node = chip->pdev->dev.of_node; + u32 temp; + int rc; + + if (chip->pwm_data.period_us > 0 && chip->pwm_data.duty_us > 0) + return 0; + + chip->pwm_data.pwm_dev = of_pwm_get(node, NULL); + if (IS_ERR(chip->pwm_data.pwm_dev)) { + rc = PTR_ERR(chip->pwm_data.pwm_dev); + pr_err("Cannot get PWM device rc=%d\n", rc); + chip->pwm_data.pwm_dev = NULL; + return rc; + } + + rc = of_property_read_u32(node, "qcom,period-us", &temp); + if (!rc) { + chip->pwm_data.period_us = temp; + } else { + pr_err("Cannot read PWM period rc=%d\n", rc); + return rc; + } + + rc = of_property_read_u32(node, "qcom,duty-us", &temp); + if (!rc) { + chip->pwm_data.duty_us = temp; + } else { + pr_err("Cannot read PWM duty rc=%d\n", rc); + return rc; + } + + rc = of_property_read_u32(node, "qcom,ext-pwm-dtest-line", &temp); + if (!rc) + chip->ext_pwm_dtest_line = temp; + + rc = of_property_read_u32(node, "qcom,ext-pwm-freq-khz", &temp); + if (!rc) { + chip->ext_pwm_freq_khz = temp; + } else if (rc != -EINVAL) { + pr_err("Unable to read ext pwm freq rc=%d\n", rc); + return rc; + } + + return 0; +} + +static int qpnp_haptics_parse_dt(struct hap_chip *chip) +{ + struct device_node *node = chip->pdev->dev.of_node; + struct device_node *revid_node, *misc_node; + const char *temp_str; + int rc, temp; + struct regulator *vcc_pon; + + rc = of_property_read_u32(node, "reg", &temp); + if (rc < 0) { + pr_err("Couldn't find reg in node = %s rc = %d\n", + node->full_name, rc); + return rc; + } + + if (temp <= 0) { + pr_err("Invalid base address %x\n", temp); + return -EINVAL; + } + chip->base = (u16)temp; + + revid_node = of_parse_phandle(node, "qcom,pmic-revid", 0); + if (!revid_node) { + pr_err("Missing qcom,pmic-revid property\n"); + return -EINVAL; + } + + chip->revid = get_revid_data(revid_node); + of_node_put(revid_node); + if (IS_ERR_OR_NULL(chip->revid)) { + pr_err("Unable to get pmic_revid rc=%ld\n", + PTR_ERR(chip->revid)); + /* + * the revid peripheral must be registered, any failure + * here only indicates that the rev-id module has not + * probed yet. + */ + return -EPROBE_DEFER; + } + + if (of_find_property(node, "qcom,pmic-misc", NULL)) { + misc_node = of_parse_phandle(node, "qcom,pmic-misc", 0); + if (!misc_node) + return -EINVAL; + + rc = of_property_read_u32(node, "qcom,misc-clk-trim-error-reg", + &chip->misc_clk_trim_error_reg); + if (rc < 0 || !chip->misc_clk_trim_error_reg) { + pr_err("Invalid or missing misc-clk-trim-error-reg\n"); + of_node_put(misc_node); + return rc; + } + + rc = qpnp_misc_read_reg(misc_node, + chip->misc_clk_trim_error_reg, + &chip->clk_trim_error_code); + if (rc < 0) { + pr_err("Couldn't get clk_trim_error_code, rc=%d\n", rc); + of_node_put(misc_node); + return -EPROBE_DEFER; + } + of_node_put(misc_node); + } + + chip->play_irq = platform_get_irq_byname(chip->pdev, "hap-play-irq"); + if (chip->play_irq < 0) { + pr_err("Unable to get play irq\n"); + return chip->play_irq; + } + + chip->sc_irq = platform_get_irq_byname(chip->pdev, "hap-sc-irq"); + if (chip->sc_irq < 0) { + pr_err("Unable to get sc irq\n"); + return chip->sc_irq; + } + + chip->act_type = HAP_LRA; + rc = of_property_read_u32(node, "qcom,actuator-type", &temp); + if (!rc) { + if (temp != HAP_LRA && temp != HAP_ERM) { + pr_err("Incorrect actuator type\n"); + return -EINVAL; + } + chip->act_type = temp; + } + + chip->lra_auto_mode = of_property_read_bool(node, "qcom,lra-auto-mode"); + + rc = of_property_read_string(node, "qcom,play-mode", &temp_str); + if (!rc) { + if (strcmp(temp_str, "direct") == 0) + chip->play_mode = HAP_DIRECT; + else if (strcmp(temp_str, "buffer") == 0) + chip->play_mode = HAP_BUFFER; + else if (strcmp(temp_str, "pwm") == 0) + chip->play_mode = HAP_PWM; + else if (strcmp(temp_str, "audio") == 0) + chip->play_mode = HAP_AUDIO; + else { + pr_err("Invalid play mode\n"); + return -EINVAL; + } + } else { + if (rc == -EINVAL && chip->act_type == HAP_LRA) { + pr_info("Play mode not specified, using auto mode\n"); + chip->lra_auto_mode = true; + } else { + pr_err("Unable to read play mode\n"); + return rc; + } + } + + chip->max_play_time_ms = HAP_MAX_PLAY_TIME_MS; + rc = of_property_read_u32(node, "qcom,max-play-time-ms", &temp); + if (!rc) { + chip->max_play_time_ms = temp; + } else if (rc != -EINVAL) { + pr_err("Unable to read max-play-time rc=%d\n", rc); + return rc; + } + + chip->vmax_mv = HAP_VMAX_MAX_MV; + rc = of_property_read_u32(node, "qcom,vmax-mv", &temp); + if (!rc) { + chip->vmax_mv = temp; + } else if (rc != -EINVAL) { + pr_err("Unable to read Vmax rc=%d\n", rc); + return rc; + } + + chip->ilim_ma = HAP_ILIM_400_MA; + rc = of_property_read_u32(node, "qcom,ilim-ma", &temp); + if (!rc) { + chip->ilim_ma = (u8)temp; + } else if (rc != -EINVAL) { + pr_err("Unable to read ILIM rc=%d\n", rc); + return rc; + } + + chip->sc_deb_cycles = HAP_DEF_SC_DEB_CYCLES; + rc = of_property_read_u32(node, "qcom,sc-dbc-cycles", &temp); + if (!rc) { + chip->sc_deb_cycles = temp; + } else if (rc != -EINVAL) { + pr_err("Unable to read sc debounce rc=%d\n", rc); + return rc; + } + + chip->wave_shape = HAP_WAVE_SQUARE; + rc = of_property_read_string(node, "qcom,wave-shape", &temp_str); + if (!rc) { + if (strcmp(temp_str, "sine") == 0) + chip->wave_shape = HAP_WAVE_SINE; + else if (strcmp(temp_str, "square") == 0) + chip->wave_shape = HAP_WAVE_SQUARE; + else { + pr_err("Unsupported wave shape\n"); + return -EINVAL; + } + } else if (rc != -EINVAL) { + pr_err("Unable to read wave shape rc=%d\n", rc); + return rc; + } + + chip->wave_play_rate_us = HAP_DEF_WAVE_PLAY_RATE_US; + rc = of_property_read_u32(node, + "qcom,wave-play-rate-us", &temp); + if (!rc) { + chip->wave_play_rate_us = temp; + } else if (rc != -EINVAL) { + pr_err("Unable to read play rate rc=%d\n", rc); + return rc; + } + + if (chip->wave_play_rate_us < HAP_WAVE_PLAY_RATE_US_MIN) + chip->wave_play_rate_us = HAP_WAVE_PLAY_RATE_US_MIN; + else if (chip->wave_play_rate_us > HAP_WAVE_PLAY_RATE_US_MAX) + chip->wave_play_rate_us = HAP_WAVE_PLAY_RATE_US_MAX; + + chip->en_brake = of_property_read_bool(node, "qcom,en-brake"); + + rc = of_property_count_elems_of_size(node, + "qcom,brake-pattern", sizeof(u32)); + if (rc > 0) { + if (rc != HAP_BRAKE_PAT_LEN) { + pr_err("Invalid length for brake pattern\n"); + return -EINVAL; + } + + rc = of_property_read_u32_array(node, "qcom,brake-pattern", + chip->brake_pat, HAP_BRAKE_PAT_LEN); + if (rc < 0) { + pr_err("Error in reading qcom,brake-pattern, rc=%d\n", + rc); + return rc; + } + } + + /* Read the following properties only for LRA */ + if (chip->act_type == HAP_LRA) { + rc = of_property_read_string(node, "qcom,lra-auto-res-mode", + &temp_str); + if (!rc) { + if (chip->revid->pmic_subtype == PM660_SUBTYPE) { + chip->ares_cfg.auto_res_mode = + HAP_PM660_AUTO_RES_QWD; + if (strcmp(temp_str, "zxd") == 0) + chip->ares_cfg.auto_res_mode = + HAP_PM660_AUTO_RES_ZXD; + else if (strcmp(temp_str, "qwd") == 0) + chip->ares_cfg.auto_res_mode = + HAP_PM660_AUTO_RES_QWD; + } else { + chip->ares_cfg.auto_res_mode = + HAP_AUTO_RES_ZXD_EOP; + if (strcmp(temp_str, "none") == 0) + chip->ares_cfg.auto_res_mode = + HAP_AUTO_RES_NONE; + else if (strcmp(temp_str, "zxd") == 0) + chip->ares_cfg.auto_res_mode = + HAP_AUTO_RES_ZXD; + else if (strcmp(temp_str, "qwd") == 0) + chip->ares_cfg.auto_res_mode = + HAP_AUTO_RES_QWD; + else if (strcmp(temp_str, "max-qwd") == 0) + chip->ares_cfg.auto_res_mode = + HAP_AUTO_RES_MAX_QWD; + else + chip->ares_cfg.auto_res_mode = + HAP_AUTO_RES_ZXD_EOP; + } + } else if (rc != -EINVAL) { + pr_err("Unable to read auto res mode rc=%d\n", rc); + return rc; + } + + chip->ares_cfg.lra_high_z = HAP_LRA_HIGH_Z_OPT3; + rc = of_property_read_string(node, "qcom,lra-high-z", + &temp_str); + if (!rc) { + if (strcmp(temp_str, "none") == 0) + chip->ares_cfg.lra_high_z = + HAP_LRA_HIGH_Z_NONE; + else if (strcmp(temp_str, "opt1") == 0) + chip->ares_cfg.lra_high_z = + HAP_LRA_HIGH_Z_OPT1; + else if (strcmp(temp_str, "opt2") == 0) + chip->ares_cfg.lra_high_z = + HAP_LRA_HIGH_Z_OPT2; + else + chip->ares_cfg.lra_high_z = + HAP_LRA_HIGH_Z_OPT3; + if (chip->revid->pmic_subtype == PM660_SUBTYPE) { + if (strcmp(temp_str, "opt0") == 0) + chip->ares_cfg.lra_high_z = + HAP_LRA_HIGH_Z_NONE; + } + } else if (rc != -EINVAL) { + pr_err("Unable to read LRA high-z rc=%d\n", rc); + return rc; + } + + chip->ares_cfg.lra_res_cal_period = HAP_RES_CAL_PERIOD_MAX; + rc = of_property_read_u32(node, + "qcom,lra-res-cal-period", &temp); + if (!rc) { + chip->ares_cfg.lra_res_cal_period = temp; + } else if (rc != -EINVAL) { + pr_err("Unable to read cal period rc=%d\n", rc); + return rc; + } + + chip->ares_cfg.lra_qwd_drive_duration = -EINVAL; + chip->ares_cfg.calibrate_at_eop = -EINVAL; + if (chip->revid->pmic_subtype == PM660_SUBTYPE) { + rc = of_property_read_u32(node, + "qcom,lra-qwd-drive-duration", + &chip->ares_cfg.lra_qwd_drive_duration); + if (rc && rc != -EINVAL) { + pr_err("Unable to read LRA QWD drive duration rc=%d\n", + rc); + return rc; + } + + rc = of_property_read_u32(node, + "qcom,lra-calibrate-at-eop", + &chip->ares_cfg.calibrate_at_eop); + if (rc && rc != -EINVAL) { + pr_err("Unable to read Calibrate at EOP rc=%d\n", + rc); + return rc; + } + } + + chip->drive_period_code_max_var_pct = 25; + rc = of_property_read_u32(node, + "qcom,drive-period-code-max-variation-pct", &temp); + if (!rc) { + if (temp > 0 && temp < 100) + chip->drive_period_code_max_var_pct = (u8)temp; + } else if (rc != -EINVAL) { + pr_err("Unable to read drive period code max var pct rc=%d\n", + rc); + return rc; + } + + chip->drive_period_code_min_var_pct = 25; + rc = of_property_read_u32(node, + "qcom,drive-period-code-min-variation-pct", &temp); + if (!rc) { + if (temp > 0 && temp < 100) + chip->drive_period_code_min_var_pct = (u8)temp; + } else if (rc != -EINVAL) { + pr_err("Unable to read drive period code min var pct rc=%d\n", + rc); + return rc; + } + + chip->auto_res_err_recovery_hw = + of_property_read_bool(node, + "qcom,auto-res-err-recovery-hw"); + + if (chip->revid->pmic_subtype != PM660_SUBTYPE) + chip->auto_res_err_recovery_hw = false; + } + + if (rc == -EINVAL) + rc = 0; + + if (chip->play_mode == HAP_BUFFER) + rc = qpnp_haptics_parse_buffer_dt(chip); + else if (chip->play_mode == HAP_PWM) + rc = qpnp_haptics_parse_pwm_dt(chip); + + if (of_find_property(node, "vcc_pon-supply", NULL)) { + vcc_pon = regulator_get(&chip->pdev->dev, "vcc_pon"); + if (IS_ERR(vcc_pon)) { + rc = PTR_ERR(vcc_pon); + dev_err(&chip->pdev->dev, + "regulator get failed vcc_pon rc=%d\n", rc); + } + chip->vcc_pon = vcc_pon; + } + + return rc; +} + +static int qpnp_haptics_probe(struct platform_device *pdev) +{ + struct hap_chip *chip; + int rc, i; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!chip->regmap) { + dev_err(&pdev->dev, "Couldn't get parent's regmap\n"); + return -EINVAL; + } + + chip->pdev = pdev; + rc = qpnp_haptics_parse_dt(chip); + if (rc < 0) { + dev_err(&pdev->dev, "Error in parsing DT parameters, rc=%d\n", + rc); + return rc; + } + + spin_lock_init(&chip->bus_lock); + mutex_init(&chip->play_lock); + mutex_init(&chip->param_lock); + INIT_WORK(&chip->haptics_work, qpnp_haptics_work); + + rc = qpnp_haptics_config(chip); + if (rc < 0) { + dev_err(&pdev->dev, "Error in configuring haptics, rc=%d\n", + rc); + goto fail; + } + + hrtimer_init(&chip->stop_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + chip->stop_timer.function = hap_stop_timer; + hrtimer_init(&chip->auto_res_err_poll_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + chip->auto_res_err_poll_timer.function = hap_auto_res_err_poll_timer; + dev_set_drvdata(&pdev->dev, chip); + + chip->cdev.name = "vibrator"; + chip->cdev.brightness_get = qpnp_haptics_brightness_get; + chip->cdev.brightness_set = qpnp_haptics_brightness_set; + chip->cdev.max_brightness = 100; + rc = devm_led_classdev_register(&pdev->dev, &chip->cdev); + if (rc < 0) { + dev_err(&pdev->dev, "Error in registering led class device, rc=%d\n", + rc); + goto register_fail; + } + + for (i = 0; i < ARRAY_SIZE(qpnp_haptics_attrs); i++) { + rc = sysfs_create_file(&chip->cdev.dev->kobj, + &qpnp_haptics_attrs[i].attr); + if (rc < 0) { + dev_err(&pdev->dev, "Error in creating sysfs file, rc=%d\n", + rc); + goto sysfs_fail; + } + } + + return 0; + +sysfs_fail: + for (--i; i >= 0; i--) + sysfs_remove_file(&chip->cdev.dev->kobj, + &qpnp_haptics_attrs[i].attr); +register_fail: + cancel_work_sync(&chip->haptics_work); + hrtimer_cancel(&chip->auto_res_err_poll_timer); + hrtimer_cancel(&chip->stop_timer); +fail: + mutex_destroy(&chip->play_lock); + mutex_destroy(&chip->param_lock); + if (chip->pwm_data.pwm_dev) + pwm_put(chip->pwm_data.pwm_dev); + dev_set_drvdata(&pdev->dev, NULL); + return rc; +} + +static int qpnp_haptics_remove(struct platform_device *pdev) +{ + struct hap_chip *chip = dev_get_drvdata(&pdev->dev); + + cancel_work_sync(&chip->haptics_work); + hrtimer_cancel(&chip->auto_res_err_poll_timer); + hrtimer_cancel(&chip->stop_timer); + mutex_destroy(&chip->play_lock); + mutex_destroy(&chip->param_lock); + if (chip->pwm_data.pwm_dev) + pwm_put(chip->pwm_data.pwm_dev); + dev_set_drvdata(&pdev->dev, NULL); + + return 0; +} + +static void qpnp_haptics_shutdown(struct platform_device *pdev) +{ + struct hap_chip *chip = dev_get_drvdata(&pdev->dev); + + cancel_work_sync(&chip->haptics_work); + + /* disable haptics */ + qpnp_haptics_mod_enable(chip, false); +} + +static const struct dev_pm_ops qpnp_haptics_pm_ops = { + .suspend = qpnp_haptics_suspend, +}; + +static const struct of_device_id hap_match_table[] = { + { .compatible = "qcom,qpnp-haptics" }, + { }, +}; + +static struct platform_driver qpnp_haptics_driver = { + .driver = { + .name = "qcom,qpnp-haptics", + .of_match_table = hap_match_table, + .pm = &qpnp_haptics_pm_ops, + }, + .probe = qpnp_haptics_probe, + .remove = qpnp_haptics_remove, + .shutdown = qpnp_haptics_shutdown, +}; +module_platform_driver(qpnp_haptics_driver); + +MODULE_DESCRIPTION("QPNP haptics driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c index 36a17d4f2018..00520dbbf70a 100644 --- a/drivers/media/platform/msm/vidc/hfi_response_handler.c +++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, 2019, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2016,2019 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -606,6 +606,11 @@ static int hfi_fill_codec_info(u8 *data_ptr, vidc_get_hal_codec((1 << i) & codecs); capability->domain = vidc_get_hal_domain(HFI_VIDEO_DOMAIN_DECODER); + if (codec_count == VIDC_MAX_DECODE_SESSIONS) { + dprintk(VIDC_ERR, + "Max supported decoder sessions reached"); + break; + } } } codecs = sys_init_done->enc_codec_supported; @@ -617,6 +622,11 @@ static int hfi_fill_codec_info(u8 *data_ptr, vidc_get_hal_codec((1 << i) & codecs); capability->domain = vidc_get_hal_domain(HFI_VIDEO_DOMAIN_ENCODER); + if (codec_count == VIDC_MAX_SESSIONS) { + dprintk(VIDC_ERR, + "Max supported sessions reached"); + break; + } } } sys_init_done->codec_count = codec_count; diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index d946b035b284..076357ca23d0 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017,2019 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -66,6 +66,9 @@ /* 16 encoder and 16 decoder sessions */ #define VIDC_MAX_SESSIONS 32 +#define VIDC_MAX_DECODE_SESSIONS 16 +#define VIDC_MAX_ENCODE_SESSIONS 16 + enum vidc_status { VIDC_ERR_NONE = 0x0, diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c index ffd448149796..4a2ae06d0da4 100644 --- a/drivers/mmc/host/sdhci-iproc.c +++ b/drivers/mmc/host/sdhci-iproc.c @@ -217,7 +217,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev) iproc_host->data = iproc_data; - mmc_of_parse(host->mmc); + ret = mmc_of_parse(host->mmc); + if (ret) + goto err; + sdhci_get_of_property(pdev); /* Enable EMMC 1/8V DDR capable */ diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 1dbee1cb3df9..8b7c6425b681 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -426,8 +426,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) { struct can_priv *priv = netdev_priv(dev); - struct sk_buff *skb = priv->echo_skb[idx]; - struct canfd_frame *cf; if (idx >= priv->echo_skb_max) { netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", @@ -435,20 +433,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 return NULL; } - if (!skb) { - netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", - __func__, idx); - return NULL; - } + if (priv->echo_skb[idx]) { + /* Using "struct canfd_frame::len" for the frame + * length is supported on both CAN and CANFD frames. + */ + struct sk_buff *skb = priv->echo_skb[idx]; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u8 len = cf->len; + + *len_ptr = len; + priv->echo_skb[idx] = NULL; - /* Using "struct canfd_frame::len" for the frame - * length is supported on both CAN and CANFD frames. - */ - cf = (struct canfd_frame *)skb->data; - *len_ptr = cf->len; - priv->echo_skb[idx] = NULL; + return skb; + } - return skb; + return NULL; } /* diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 650f7888e32b..55ac00055977 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1888,6 +1888,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) u16 i, j; u8 __iomem *bd; + netdev_reset_queue(ugeth->ndev); + ug_info = ugeth->ug_info; uf_info = &ug_info->uf_info; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 90db94e83fde..033f99d2f15c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -1906,9 +1906,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, { struct mlx4_cmd_mailbox *mailbox; __be32 *outbox; + u64 qword_field; u32 dword_field; - int err; + u16 word_field; u8 byte_field; + int err; static const u8 a0_dmfs_query_hw_steering[] = { [0] = MLX4_STEERING_DMFS_A0_DEFAULT, [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, @@ -1936,19 +1938,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* QPC/EEC/CQC/EQC/RDMARC attributes */ - MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); - MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); - MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); - MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); - MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); - MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); - MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); - MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); - MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); - MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); - MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); - MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); - MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); + MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET); + param->qpc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET); + param->log_num_qps = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET); + param->srqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET); + param->log_num_srqs = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET); + param->cqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET); + param->log_num_cqs = byte_field & 0x1f; + MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET); + param->altc_base = qword_field; + MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET); + param->auxc_base = qword_field; + MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET); + param->eqc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET); + param->log_num_eqs = byte_field & 0x1f; + MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); + param->num_sys_eqs = word_field & 0xfff; + MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET); + param->rdmarc_base = qword_field & ~((u64)0x1f); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET); + param->log_rd_per_qp = byte_field & 0x7; MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { @@ -1967,22 +1982,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* steering attributes */ if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); - MLX4_GET(param->log_mc_entry_sz, outbox, - INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); - MLX4_GET(param->log_mc_table_sz, outbox, - INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); - MLX4_GET(byte_field, outbox, - INIT_HCA_FS_A0_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); + param->log_mc_entry_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); + param->log_mc_table_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET); param->dmfs_high_steer_mode = a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; } else { MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); - MLX4_GET(param->log_mc_entry_sz, outbox, - INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); - MLX4_GET(param->log_mc_hash_sz, outbox, - INIT_HCA_LOG_MC_HASH_SZ_OFFSET); - MLX4_GET(param->log_mc_table_sz, outbox, - INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + param->log_mc_entry_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + param->log_mc_hash_sz = byte_field & 0x1f; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + param->log_mc_table_sz = byte_field & 0x1f; } /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ @@ -2006,15 +2020,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, /* TPT attributes */ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); - MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); - MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET); + param->mw_enabled = byte_field >> 7; + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); + param->log_mpt_sz = byte_field & 0x3f; MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); /* UAR attributes */ MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); - MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); + param->log_uar_sz = byte_field & 0xf; /* phv_check enable */ MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 583d50f80b24..02327e6c4819 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -442,6 +442,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, if (pskb_trim_rcsum(skb, len)) goto drop; + ph = pppoe_hdr(skb); pn = pppoe_pernet(dev_net(dev)); /* Note that get_item does a sock_hold(), so sk_pppox(po) diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index df5f72c7b25d..8bf7b76f4131 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1689,6 +1689,30 @@ static ssize_t cnss_fs_ready_store(struct device *dev, static DEVICE_ATTR(fs_ready, 0220, NULL, cnss_fs_ready_store); +static ssize_t cnss_wl_pwr_on(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int pwr_state = 0; + struct cnss_plat_data *plat_priv = dev_get_drvdata(dev); + + if (sscanf(buf, "%du", &pwr_state) != 1) + return -EINVAL; + + cnss_pr_dbg("vreg-wlan-en state change %d, count %zu", pwr_state, + count); + + if (pwr_state) + cnss_power_on_device(plat_priv); + else + cnss_power_off_device(plat_priv); + + return count; +} + +static DEVICE_ATTR(wl_pwr_on, 0220, NULL, cnss_wl_pwr_on); + static int cnss_create_sysfs(struct cnss_plat_data *plat_priv) { int ret = 0; @@ -1709,6 +1733,27 @@ static void cnss_remove_sysfs(struct cnss_plat_data *plat_priv) device_remove_file(&plat_priv->plat_dev->dev, &dev_attr_fs_ready); } +static int cnss_create_sysfs_wl_pwr(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + + ret = device_create_file(&plat_priv->plat_dev->dev, + &dev_attr_wl_pwr_on); + if (ret) { + cnss_pr_err("Failed to create device file, err = %d\n", ret); + goto out; + } + cnss_pr_dbg("created sysfs for vreg-wlan-en control\n"); + return 0; +out: + return ret; +} + +static void cnss_remove_sysfs_wl_pwr(struct cnss_plat_data *plat_priv) +{ + device_remove_file(&plat_priv->plat_dev->dev, &dev_attr_wl_pwr_on); +} + static int cnss_event_work_init(struct cnss_plat_data *plat_priv) { spin_lock_init(&plat_priv->event_lock); @@ -1798,6 +1843,7 @@ static int cnss_probe(struct platform_device *plat_dev) plat_priv->plat_dev = plat_dev; plat_priv->device_id = device_id->driver_data; plat_priv->bus_type = cnss_get_bus_type(plat_priv->device_id); + cnss_pr_dbg("bus type selected %d\n", plat_priv->bus_type); cnss_set_plat_priv(plat_dev, plat_priv); platform_set_drvdata(plat_dev, plat_priv); @@ -1827,10 +1873,14 @@ static int cnss_probe(struct platform_device *plat_dev) if (ret) goto unreg_bus_scale; - ret = cnss_event_work_init(plat_priv); + ret = cnss_create_sysfs_wl_pwr(plat_priv); if (ret) goto remove_sysfs; + ret = cnss_event_work_init(plat_priv); + if (ret) + goto remove_sysfs_pwr; + ret = cnss_qmi_init(plat_priv); if (ret) goto deinit_event_work; @@ -1870,6 +1920,8 @@ deinit_event_work: cnss_event_work_deinit(plat_priv); remove_sysfs: cnss_remove_sysfs(plat_priv); +remove_sysfs_pwr: + cnss_remove_sysfs_wl_pwr(plat_priv); unreg_bus_scale: cnss_unregister_bus_scale(plat_priv); unreg_esoc: diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c index 3fbfb590a384..30ac0cc9ad51 100644 --- a/drivers/net/wireless/cnss2/qmi.c +++ b/drivers/net/wireless/cnss2/qmi.c @@ -25,6 +25,7 @@ #define MAX_BDF_FILE_NAME 11 #define DEFAULT_BDF_FILE_NAME "bdwlan.elf" #define BDF_FILE_NAME_PREFIX "bdwlan.e" +#define BIN_BDF_FILE_NAME_PREFIX "bdwlan.b" #define DEFAULT_BIN_BDF_FILE_NAME "bdwlan.bin" #ifdef CONFIG_CNSS2_DEBUG @@ -803,10 +804,16 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv) else snprintf(filename, sizeof(filename), DEFAULT_BDF_FILE_NAME); - else - snprintf(filename, sizeof(filename), - BDF_FILE_NAME_PREFIX "%02x", - plat_priv->board_info.board_id); + else { + if (bdf_type == CNSS_BDF_BIN) + snprintf(filename, sizeof(filename), + BIN_BDF_FILE_NAME_PREFIX "%02x", + plat_priv->board_info.board_id); + else + snprintf(filename, sizeof(filename), + BDF_FILE_NAME_PREFIX "%02x", + plat_priv->board_info.board_id); + } if (bdf_bypass) { cnss_pr_info("bdf_bypass is enabled, sending dummy BDF\n"); diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 852d2de7f69f..a284a2b42bcd 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -339,8 +339,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x30, { KEY_VOLUMEUP } }, { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, { KE_KEY, 0x32, { KEY_MUTE } }, - { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */ - { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */ + { KE_KEY, 0x35, { KEY_SCREENLOCK } }, { KE_KEY, 0x40, { KEY_PREVIOUSSONG } }, { KE_KEY, 0x41, { KEY_NEXTSONG } }, { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */ diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index 58ec25cab969..41dd7ddb5e40 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -310,6 +310,8 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(battery_info), POWER_SUPPLY_ATTR(battery_info_id), POWER_SUPPLY_ATTR(enable_jeita_detection), + POWER_SUPPLY_ATTR(allow_hvdcp3), + POWER_SUPPLY_ATTR(max_pulse_allowed), /* Local extensions of type int64_t */ POWER_SUPPLY_ATTR(charge_counter_ext), /* Properties of type `const char *' */ diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c index a12b0adfa32d..1ace44f121b4 100644 --- a/drivers/power/supply/qcom/qpnp-fg.c +++ b/drivers/power/supply/qcom/qpnp-fg.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, 2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -33,6 +33,7 @@ #include <linux/ktime.h> #include <linux/power_supply.h> #include <linux/of_batterydata.h> +#include <linux/spinlock.h> #include <linux/string_helpers.h> #include <linux/alarmtimer.h> #include <linux/qpnp/qpnp-revid.h> @@ -72,6 +73,7 @@ #define QPNP_FG_DEV_NAME "qcom,qpnp-fg" #define MEM_IF_TIMEOUT_MS 5000 +#define FG_CYCLE_MS 1500 #define BUCKET_COUNT 8 #define BUCKET_SOC_PCT (256 / BUCKET_COUNT) @@ -108,6 +110,7 @@ enum pmic_subtype { PMI8950 = 17, PMI8996 = 19, PMI8937 = 55, + PMI8940 = 64, }; enum wa_flags { @@ -150,6 +153,8 @@ struct fg_learning_data { int min_temp; int max_temp; int vbat_est_thr_uv; + int max_cap_limit; + int min_cap_limit; }; struct fg_rslow_data { @@ -275,11 +280,45 @@ static struct fg_mem_data fg_data[FG_DATA_MAX] = { DATA(BATT_ID_INFO, 0x594, 3, 1, -EINVAL), }; +enum fg_mem_backup_index { + FG_BACKUP_SOC = 0, + FG_BACKUP_CYCLE_COUNT, + FG_BACKUP_CC_SOC_COEFF, + FG_BACKUP_IGAIN, + FG_BACKUP_VCOR, + FG_BACKUP_TEMP_COUNTER, + FG_BACKUP_AGING_STORAGE, + FG_BACKUP_MAH_TO_SOC, + FG_BACKUP_MAX, +}; + +#define BACKUP(_idx, _address, _offset, _length, _value) \ + [FG_BACKUP_##_idx] = { \ + .address = _address, \ + .offset = _offset, \ + .len = _length, \ + .value = _value, \ + } \ + +static struct fg_mem_data fg_backup_regs[FG_BACKUP_MAX] = { + /* ID Address, Offset, Length, Value*/ + BACKUP(SOC, 0x564, 0, 24, -EINVAL), + BACKUP(CYCLE_COUNT, 0x5E8, 0, 16, -EINVAL), + BACKUP(CC_SOC_COEFF, 0x5BC, 0, 8, -EINVAL), + BACKUP(IGAIN, 0x424, 0, 4, -EINVAL), + BACKUP(VCOR, 0x484, 0, 4, -EINVAL), + BACKUP(TEMP_COUNTER, 0x580, 0, 4, -EINVAL), + BACKUP(AGING_STORAGE, 0x5E4, 0, 4, -EINVAL), + BACKUP(MAH_TO_SOC, 0x4A0, 0, 4, -EINVAL), +}; + static int fg_debug_mask; module_param_named( debug_mask, fg_debug_mask, int, 00600 ); +static int fg_reset_on_lockup; + static int fg_sense_type = -EINVAL; static int fg_restart; @@ -298,9 +337,18 @@ module_param_named( sram_update_period_ms, fg_sram_update_period_ms, int, 00600 ); +static bool fg_batt_valid_ocv; +module_param_named(batt_valid_ocv, fg_batt_valid_ocv, bool, 0600 +); + +static int fg_batt_range_pct; +module_param_named(batt_range_pct, fg_batt_range_pct, int, 0600 +); + struct fg_irq { int irq; - unsigned long disabled; + bool disabled; + bool wakeup; }; enum fg_soc_irq { @@ -348,6 +396,16 @@ enum register_type { MAX_ADDRESS, }; +enum batt_info_params { + BATT_INFO_NOTIFY = 0, + BATT_INFO_SOC, + BATT_INFO_RES_ID, + BATT_INFO_VOLTAGE, + BATT_INFO_TEMP, + BATT_INFO_FCC, + BATT_INFO_MAX, +}; + struct register_offset { u16 address[MAX_ADDRESS]; }; @@ -395,6 +453,22 @@ static void fg_relax(struct fg_wakeup_source *source) } } +enum slope_limit_status { + LOW_TEMP_CHARGE, + HIGH_TEMP_CHARGE, + LOW_TEMP_DISCHARGE, + HIGH_TEMP_DISCHARGE, + SLOPE_LIMIT_MAX, +}; + +#define VOLT_GAIN_MAX 3 +struct dischg_gain_soc { + bool enable; + u32 soc[VOLT_GAIN_MAX]; + u32 medc_gain[VOLT_GAIN_MAX]; + u32 highc_gain[VOLT_GAIN_MAX]; +}; + #define THERMAL_COEFF_N_BYTES 6 struct fg_chip { struct device *dev; @@ -420,6 +494,7 @@ struct fg_chip { struct completion first_soc_done; struct power_supply *bms_psy; struct power_supply_desc bms_psy_d; + spinlock_t sec_access_lock; struct mutex rw_lock; struct mutex sysfs_restart_lock; struct delayed_work batt_profile_init; @@ -449,6 +524,7 @@ struct fg_chip { struct fg_wakeup_source update_sram_wakeup_source; bool fg_restarting; bool profile_loaded; + bool soc_reporting_ready; bool use_otp_profile; bool battery_missing; bool power_supply_registered; @@ -459,6 +535,7 @@ struct fg_chip { bool charge_done; bool resume_soc_lowered; bool vbat_low_irq_enabled; + bool full_soc_irq_enabled; bool charge_full; bool hold_soc_while_full; bool input_present; @@ -467,6 +544,10 @@ struct fg_chip { bool bad_batt_detection_en; bool bcl_lpm_disabled; bool charging_disabled; + bool use_vbat_low_empty_soc; + bool fg_shutdown; + bool use_soft_jeita_irq; + bool allow_false_negative_isense; struct delayed_work update_jeita_setting; struct delayed_work update_sram_data; struct delayed_work update_temp_work; @@ -491,6 +572,7 @@ struct fg_chip { int prev_status; int health; enum fg_batt_aging_mode batt_aging_mode; + struct alarm hard_jeita_alarm; /* capacity learning */ struct fg_learning_data learning_data; struct alarm fg_cap_learning_alarm; @@ -498,6 +580,7 @@ struct fg_chip { struct fg_cc_soc_data sw_cc_soc_data; /* rslow compensation */ struct fg_rslow_data rslow_comp; + int rconn_mohm; /* cycle counter */ struct fg_cyc_ctr_data cyc_ctr; /* iadc compensation */ @@ -510,6 +593,8 @@ struct fg_chip { bool jeita_hysteresis_support; bool batt_hot; bool batt_cold; + bool batt_warm; + bool batt_cool; int cold_hysteresis; int hot_hysteresis; /* ESR pulse tuning */ @@ -518,6 +603,47 @@ struct fg_chip { bool esr_extract_disabled; bool imptr_pulse_slow_en; bool esr_pulse_tune_en; + /* Slope limiter */ + struct work_struct slope_limiter_work; + struct fg_wakeup_source slope_limit_wakeup_source; + bool soc_slope_limiter_en; + enum slope_limit_status slope_limit_sts; + u32 slope_limit_temp; + u32 slope_limit_coeffs[SLOPE_LIMIT_MAX]; + /* Discharge soc gain */ + struct work_struct dischg_gain_work; + struct fg_wakeup_source dischg_gain_wakeup_source; + struct dischg_gain_soc dischg_gain; + /* IMA error recovery */ + struct completion fg_reset_done; + struct work_struct ima_error_recovery_work; + struct fg_wakeup_source fg_reset_wakeup_source; + struct mutex ima_recovery_lock; + bool ima_error_handling; + bool block_sram_access; + bool irqs_enabled; + bool use_last_soc; + int last_soc; + /* Validating temperature */ + int last_good_temp; + int batt_temp_low_limit; + int batt_temp_high_limit; + /* Validating CC_SOC */ + struct work_struct cc_soc_store_work; + struct fg_wakeup_source cc_soc_wakeup_source; + int cc_soc_limit_pct; + bool use_last_cc_soc; + int64_t last_cc_soc; + /* Sanity check */ + struct delayed_work check_sanity_work; + struct fg_wakeup_source sanity_wakeup_source; + u8 last_beat_count; + /* Batt_info restore */ + int batt_info[BATT_INFO_MAX]; + int batt_info_id; + bool batt_info_restore; + bool *batt_range_ocv; + int *batt_range_pct; }; /* FG_MEMIF DEBUGFS structures */ @@ -661,17 +787,56 @@ static int fg_read(struct fg_chip *chip, u8 *val, u16 addr, int len) return rc; } -static int fg_masked_write(struct fg_chip *chip, u16 addr, +static int fg_masked_write_raw(struct fg_chip *chip, u16 addr, u8 mask, u8 val, int len) { int rc; rc = regmap_update_bits(chip->regmap, addr, mask, val); - if (rc) { + if (rc) pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc); - return rc; + + return rc; +} + +static int fg_masked_write(struct fg_chip *chip, u16 addr, + u8 mask, u8 val, int len) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&chip->sec_access_lock, flags); + rc = fg_masked_write_raw(chip, addr, mask, val, len); + spin_unlock_irqrestore(&chip->sec_access_lock, flags); + + return rc; +} + +#define SEC_ACCESS_OFFSET 0xD0 +#define SEC_ACCESS_VALUE 0xA5 +#define PERIPHERAL_MASK 0xFF +static int fg_sec_masked_write(struct fg_chip *chip, u16 addr, u8 mask, u8 val, + int len) +{ + int rc; + unsigned long flags; + u8 temp; + u16 base = addr & (~PERIPHERAL_MASK); + + spin_lock_irqsave(&chip->sec_access_lock, flags); + temp = SEC_ACCESS_VALUE; + rc = fg_write(chip, &temp, base + SEC_ACCESS_OFFSET, 1); + if (rc) { + pr_err("Unable to unlock sec_access: %d\n", rc); + goto out; } + rc = fg_masked_write_raw(chip, addr, mask, val, len); + if (rc) + pr_err("Unable to write securely to address 0x%x: %d", addr, + rc); +out: + spin_unlock_irqrestore(&chip->sec_access_lock, flags); return rc; } @@ -952,6 +1117,7 @@ static int fg_conventional_mem_write(struct fg_chip *chip, u8 *val, u16 address, int rc = 0, user_cnt = 0, sublen; bool access_configured = false; u8 *wr_data = val, word[4]; + u16 orig_address = address; char str[DEBUG_PRINT_BUFFER_SIZE]; if (address < RAM_OFFSET) @@ -960,8 +1126,8 @@ static int fg_conventional_mem_write(struct fg_chip *chip, u8 *val, u16 address, if (offset > 3) return -EINVAL; - address = ((address + offset) / 4) * 4; - offset = (address + offset) % 4; + address = ((orig_address + offset) / 4) * 4; + offset = (orig_address + offset) % 4; user_cnt = atomic_add_return(1, &chip->memif_user_cnt); if (fg_debug_mask & FG_MEM_DEBUG_WRITES) @@ -1061,50 +1227,253 @@ out: #define MEM_INTF_IMA_EXP_STS 0x55 #define MEM_INTF_IMA_HW_STS 0x56 #define MEM_INTF_IMA_BYTE_EN 0x60 -#define IMA_ADDR_STBL_ERR BIT(7) -#define IMA_WR_ACS_ERR BIT(6) -#define IMA_RD_ACS_ERR BIT(5) #define IMA_IACS_CLR BIT(2) #define IMA_IACS_RDY BIT(1) -static int fg_check_ima_exception(struct fg_chip *chip) +static int fg_run_iacs_clear_sequence(struct fg_chip *chip) +{ + int rc = 0; + u8 temp; + + if (fg_debug_mask & FG_STATUS) + pr_info("Running IACS clear sequence\n"); + + /* clear the error */ + rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG, + IMA_IACS_CLR, IMA_IACS_CLR, 1); + if (rc) { + pr_err("Error writing to IMA_CFG, rc=%d\n", rc); + return rc; + } + + temp = 0x4; + rc = fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1); + if (rc) { + pr_err("Error writing to MEM_INTF_ADDR_MSB, rc=%d\n", rc); + return rc; + } + + temp = 0x0; + rc = fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1); + if (rc) { + pr_err("Error writing to WR_DATA3, rc=%d\n", rc); + return rc; + } + + rc = fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1); + if (rc) { + pr_err("Error writing to RD_DATA3, rc=%d\n", rc); + return rc; + } + + rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG, + IMA_IACS_CLR, 0, 1); + if (rc) { + pr_err("Error writing to IMA_CFG, rc=%d\n", rc); + return rc; + } + + if (fg_debug_mask & FG_STATUS) + pr_info("IACS clear sequence complete!\n"); + return rc; +} + +#define IACS_ERR_BIT BIT(0) +#define XCT_ERR_BIT BIT(1) +#define DATA_RD_ERR_BIT BIT(3) +#define DATA_WR_ERR_BIT BIT(4) +#define ADDR_BURST_WRAP_BIT BIT(5) +#define ADDR_RNG_ERR_BIT BIT(6) +#define ADDR_SRC_ERR_BIT BIT(7) +static int fg_check_ima_exception(struct fg_chip *chip, bool check_hw_sts) { int rc = 0, ret = 0; - u8 err_sts, exp_sts = 0, hw_sts = 0; + u8 err_sts = 0, exp_sts = 0, hw_sts = 0; + bool run_err_clr_seq = false; rc = fg_read(chip, &err_sts, chip->mem_base + MEM_INTF_IMA_ERR_STS, 1); if (rc) { - pr_err("failed to read beat count rc=%d\n", rc); + pr_err("failed to read IMA_ERR_STS, rc=%d\n", rc); return rc; } - if (err_sts & (IMA_ADDR_STBL_ERR | IMA_WR_ACS_ERR | IMA_RD_ACS_ERR)) { - u8 temp; - - fg_read(chip, &exp_sts, + rc = fg_read(chip, &exp_sts, chip->mem_base + MEM_INTF_IMA_EXP_STS, 1); - fg_read(chip, &hw_sts, + if (rc) { + pr_err("Error in reading IMA_EXP_STS, rc=%d\n", rc); + return rc; + } + + rc = fg_read(chip, &hw_sts, chip->mem_base + MEM_INTF_IMA_HW_STS, 1); - pr_err("IMA access failed ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n", - err_sts, exp_sts, hw_sts); - rc = err_sts; - - /* clear the error */ - ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG, - IMA_IACS_CLR, IMA_IACS_CLR, 1); - temp = 0x4; - ret |= fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1); - temp = 0x0; - ret |= fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1); - ret |= fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1); - ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG, - IMA_IACS_CLR, 0, 1); + if (rc) { + pr_err("Error in reading IMA_HW_STS, rc=%d\n", rc); + return rc; + } + + pr_info_once("Initial ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n", + err_sts, exp_sts, hw_sts); + + if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES)) + pr_info("ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n", + err_sts, exp_sts, hw_sts); + + if (check_hw_sts) { + /* + * Lower nibble should be equal to upper nibble before SRAM + * transactions begins from SW side. If they are unequal, then + * the error clear sequence should be run irrespective of IMA + * exception errors. + */ + if ((hw_sts & 0x0F) != hw_sts >> 4) { + pr_err("IMA HW not in correct state, hw_sts=%x\n", + hw_sts); + run_err_clr_seq = true; + } + } + + if (exp_sts & (IACS_ERR_BIT | XCT_ERR_BIT | DATA_RD_ERR_BIT | + DATA_WR_ERR_BIT | ADDR_BURST_WRAP_BIT | ADDR_RNG_ERR_BIT | + ADDR_SRC_ERR_BIT)) { + pr_err("IMA exception bit set, exp_sts=%x\n", exp_sts); + run_err_clr_seq = true; + } + + if (run_err_clr_seq) { + ret = fg_run_iacs_clear_sequence(chip); if (!ret) return -EAGAIN; + else + pr_err("Error clearing IMA exception ret=%d\n", ret); + } + + return rc; +} + +static void fg_enable_irqs(struct fg_chip *chip, bool enable) +{ + if (!(enable ^ chip->irqs_enabled)) + return; + + if (enable) { + enable_irq(chip->soc_irq[DELTA_SOC].irq); + enable_irq_wake(chip->soc_irq[DELTA_SOC].irq); + if (!chip->full_soc_irq_enabled) { + enable_irq(chip->soc_irq[FULL_SOC].irq); + enable_irq_wake(chip->soc_irq[FULL_SOC].irq); + chip->full_soc_irq_enabled = true; + } + enable_irq(chip->batt_irq[BATT_MISSING].irq); + if (!chip->vbat_low_irq_enabled) { + enable_irq(chip->batt_irq[VBATT_LOW].irq); + enable_irq_wake(chip->batt_irq[VBATT_LOW].irq); + chip->vbat_low_irq_enabled = true; + } + if (!chip->use_vbat_low_empty_soc) { + enable_irq(chip->soc_irq[EMPTY_SOC].irq); + enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq); + } + chip->irqs_enabled = true; + } else { + disable_irq_wake(chip->soc_irq[DELTA_SOC].irq); + disable_irq_nosync(chip->soc_irq[DELTA_SOC].irq); + if (chip->full_soc_irq_enabled) { + disable_irq_wake(chip->soc_irq[FULL_SOC].irq); + disable_irq_nosync(chip->soc_irq[FULL_SOC].irq); + chip->full_soc_irq_enabled = false; + } + disable_irq(chip->batt_irq[BATT_MISSING].irq); + if (chip->vbat_low_irq_enabled) { + disable_irq_wake(chip->batt_irq[VBATT_LOW].irq); + disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq); + chip->vbat_low_irq_enabled = false; + } + if (!chip->use_vbat_low_empty_soc) { + disable_irq_wake(chip->soc_irq[EMPTY_SOC].irq); + disable_irq_nosync(chip->soc_irq[EMPTY_SOC].irq); + } + chip->irqs_enabled = false; + } + + if (fg_debug_mask & FG_STATUS) + pr_info("FG interrupts are %sabled\n", enable ? "en" : "dis"); +} - pr_err("Error clearing IMA exception ret=%d\n", ret); +static void fg_check_ima_error_handling(struct fg_chip *chip) +{ + if (chip->ima_error_handling) { + if (fg_debug_mask & FG_STATUS) + pr_info("IMA error is handled already!\n"); + return; } + mutex_lock(&chip->ima_recovery_lock); + fg_enable_irqs(chip, false); + chip->use_last_cc_soc = true; + chip->ima_error_handling = true; + if (!work_pending(&chip->ima_error_recovery_work)) + schedule_work(&chip->ima_error_recovery_work); + mutex_unlock(&chip->ima_recovery_lock); +} + +#define SOC_ALG_ST 0xCF +#define FGXCT_PRD BIT(7) +#define ALG_ST_CHECK_COUNT 20 +static int fg_check_alg_status(struct fg_chip *chip) +{ + int rc = 0, timeout = ALG_ST_CHECK_COUNT, count = 0; + u8 ima_opr_sts, alg_sts = 0, temp = 0; + if (!fg_reset_on_lockup) { + pr_info("FG lockup detection cannot be run\n"); + return 0; + } + + rc = fg_read(chip, &alg_sts, chip->soc_base + SOC_ALG_ST, 1); + if (rc) { + pr_err("Error in reading SOC_ALG_ST, rc=%d\n", rc); + return rc; + } + + while (1) { + rc = fg_read(chip, &ima_opr_sts, + chip->mem_base + MEM_INTF_IMA_OPR_STS, 1); + if (!rc && !(ima_opr_sts & FGXCT_PRD)) + break; + + if (rc) { + pr_err("Error in reading IMA_OPR_STS, rc=%d\n", + rc); + break; + } + + rc = fg_read(chip, &temp, chip->soc_base + SOC_ALG_ST, + 1); + if (rc) { + pr_err("Error in reading SOC_ALG_ST, rc=%d\n", + rc); + break; + } + + if ((ima_opr_sts & FGXCT_PRD) && (temp == alg_sts)) + count++; + + /* Wait for ~10ms while polling ALG_ST & IMA_OPR_STS */ + usleep_range(9000, 11000); + + if (!(--timeout)) + break; + } + + if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES)) + pr_info("ima_opr_sts: %x alg_sts: %x count=%d\n", ima_opr_sts, + alg_sts, count); + + if (count == ALG_ST_CHECK_COUNT) { + /* If we are here, that means FG ALG is stuck */ + pr_err("ALG is stuck\n"); + fg_check_ima_error_handling(chip); + rc = -EBUSY; + } return rc; } @@ -1122,19 +1491,25 @@ static int fg_check_iacs_ready(struct fg_chip *chip) while (1) { rc = fg_read(chip, &ima_opr_sts, chip->mem_base + MEM_INTF_IMA_OPR_STS, 1); - if (!rc && (ima_opr_sts & IMA_IACS_RDY)) + if (!rc && (ima_opr_sts & IMA_IACS_RDY)) { break; + } else { + if (!(--timeout) || rc) + break; - if (!(--timeout) || rc) - break; - /* delay for iacs_ready to be asserted */ - usleep_range(5000, 7000); + /* delay for iacs_ready to be asserted */ + usleep_range(5000, 7000); + } } if (!timeout || rc) { - pr_err("IACS_RDY not set\n"); + pr_err("IACS_RDY not set, ima_opr_sts: %x\n", ima_opr_sts); + rc = fg_check_alg_status(chip); + if (rc && rc != -EBUSY) + pr_err("Couldn't check FG ALG status, rc=%d\n", + rc); /* perform IACS_CLR sequence */ - fg_check_ima_exception(chip); + fg_check_ima_exception(chip, false); return -EBUSY; } @@ -1154,15 +1529,16 @@ static int __fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, while (len > 0) { num_bytes = (offset + len) > BUF_LEN ? - (BUF_LEN - offset) : len; + (BUF_LEN - offset) : len; /* write to byte_enable */ for (i = offset; i < (offset + num_bytes); i++) byte_enable |= BIT(i); rc = fg_write(chip, &byte_enable, - chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1); + chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1); if (rc) { - pr_err("Unable to write to byte_en_reg rc=%d\n", rc); + pr_err("Unable to write to byte_en_reg rc=%d\n", + rc); return rc; } /* write data */ @@ -1193,12 +1569,13 @@ static int __fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, rc = fg_check_iacs_ready(chip); if (rc) { - pr_debug("IACS_RDY failed rc=%d\n", rc); + pr_err("IACS_RDY failed post write to address %x offset %d rc=%d\n", + address, offset, rc); return rc; } /* check for error condition */ - rc = fg_check_ima_exception(chip); + rc = fg_check_ima_exception(chip, false); if (rc) { pr_err("IMA transaction failed rc=%d", rc); return rc; @@ -1239,12 +1616,13 @@ static int __fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address, rc = fg_check_iacs_ready(chip); if (rc) { - pr_debug("IACS_RDY failed rc=%d\n", rc); + pr_err("IACS_RDY failed post read for address %x offset %d rc=%d\n", + address, offset, rc); return rc; } /* check for error condition */ - rc = fg_check_ima_exception(chip); + rc = fg_check_ima_exception(chip, false); if (rc) { pr_err("IMA transaction failed rc=%d", rc); return rc; @@ -1296,7 +1674,7 @@ static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val, * clear, then return an error instead of waiting for it again. */ if (time_count > 4) { - pr_err("Waited for 1.5 seconds polling RIF_MEM_ACCESS_REQ\n"); + pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n"); return -ETIMEDOUT; } @@ -1322,7 +1700,8 @@ static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val, rc = fg_check_iacs_ready(chip); if (rc) { - pr_debug("IACS_RDY failed rc=%d\n", rc); + pr_err("IACS_RDY failed before setting address: %x offset: %d rc=%d\n", + address, offset, rc); return rc; } @@ -1335,7 +1714,8 @@ static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val, rc = fg_check_iacs_ready(chip); if (rc) - pr_debug("IACS_RDY failed rc=%d\n", rc); + pr_err("IACS_RDY failed after setting address: %x offset: %d rc=%d\n", + address, offset, rc); return rc; } @@ -1346,10 +1726,13 @@ static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val, static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address, int len, int offset) { - int rc = 0, orig_address = address; + int rc = 0, ret, orig_address = address; u8 start_beat_count, end_beat_count, count = 0; bool retry = false; + if (chip->fg_shutdown) + return -EINVAL; + if (offset > 3) { pr_err("offset too large %d\n", offset); return -EINVAL; @@ -1372,11 +1755,22 @@ static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address, } mutex_lock(&chip->rw_lock); + if (fg_debug_mask & FG_MEM_DEBUG_READS) + pr_info("Read for %d bytes is attempted @ 0x%x[%d]\n", + len, address, offset); retry: + if (count >= RETRY_COUNT) { + pr_err("Retried reading 3 times\n"); + retry = false; + goto out; + } + rc = fg_interleaved_mem_config(chip, val, address, offset, len, 0); if (rc) { pr_err("failed to configure SRAM for IMA rc = %d\n", rc); + retry = true; + count++; goto out; } @@ -1385,18 +1779,21 @@ retry: chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1); if (rc) { pr_err("failed to read beat count rc=%d\n", rc); + retry = true; + count++; goto out; } /* read data */ rc = __fg_interleaved_mem_read(chip, val, address, offset, len); if (rc) { + count++; if ((rc == -EAGAIN) && (count < RETRY_COUNT)) { - count++; pr_err("IMA access failed retry_count = %d\n", count); goto retry; } else { pr_err("failed to read SRAM address rc = %d\n", rc); + retry = true; goto out; } } @@ -1406,6 +1803,8 @@ retry: chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1); if (rc) { pr_err("failed to read beat count rc=%d\n", rc); + retry = true; + count++; goto out; } @@ -1418,12 +1817,13 @@ retry: if (fg_debug_mask & FG_MEM_DEBUG_READS) pr_info("Beat count do not match - retry transaction\n"); retry = true; + count++; } out: /* Release IMA access */ - rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1); - if (rc) - pr_err("failed to reset IMA access bit rc = %d\n", rc); + ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1); + if (ret) + pr_err("failed to reset IMA access bit ret = %d\n", ret); if (retry) { retry = false; @@ -1439,8 +1839,12 @@ exit: static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address, int len, int offset) { - int rc = 0, orig_address = address; + int rc = 0, ret, orig_address = address; u8 count = 0; + bool retry = false; + + if (chip->fg_shutdown) + return -EINVAL; if (address < RAM_OFFSET) return -EINVAL; @@ -1455,32 +1859,49 @@ static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address, offset = (orig_address + offset) % 4; mutex_lock(&chip->rw_lock); + if (fg_debug_mask & FG_MEM_DEBUG_WRITES) + pr_info("Write for %d bytes is attempted @ 0x%x[%d]\n", + len, address, offset); retry: + if (count >= RETRY_COUNT) { + pr_err("Retried writing 3 times\n"); + retry = false; + goto out; + } + rc = fg_interleaved_mem_config(chip, val, address, offset, len, 1); if (rc) { - pr_err("failed to xonfigure SRAM for IMA rc = %d\n", rc); + pr_err("failed to configure SRAM for IMA rc = %d\n", rc); + retry = true; + count++; goto out; } /* write data */ rc = __fg_interleaved_mem_write(chip, val, address, offset, len); if (rc) { + count++; if ((rc == -EAGAIN) && (count < RETRY_COUNT)) { - count++; pr_err("IMA access failed retry_count = %d\n", count); goto retry; } else { pr_err("failed to write SRAM address rc = %d\n", rc); + retry = true; goto out; } } out: /* Release IMA access */ - rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1); - if (rc) - pr_err("failed to reset IMA access bit rc = %d\n", rc); + ret = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1); + if (ret) + pr_err("failed to reset IMA access bit ret = %d\n", ret); + + if (retry) { + retry = false; + goto retry; + } mutex_unlock(&chip->rw_lock); fg_relax(&chip->memif_wakeup_source); @@ -1490,6 +1911,9 @@ out: static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address, int len, int offset, bool keep_access) { + if (chip->block_sram_access) + return -EBUSY; + if (chip->ima_supported) return fg_interleaved_mem_read(chip, val, address, len, offset); @@ -1501,6 +1925,9 @@ static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address, static int fg_mem_write(struct fg_chip *chip, u8 *val, u16 address, int len, int offset, bool keep_access) { + if (chip->block_sram_access) + return -EBUSY; + if (chip->ima_supported) return fg_interleaved_mem_write(chip, val, address, len, offset); @@ -1538,6 +1965,62 @@ static int fg_mem_masked_write(struct fg_chip *chip, u16 addr, return rc; } +static u8 sram_backup_buffer[100]; +static int fg_backup_sram_registers(struct fg_chip *chip, bool save) +{ + int rc, i, len, offset; + u16 address; + u8 *ptr; + + if (fg_debug_mask & FG_STATUS) + pr_info("%sing SRAM registers\n", save ? "Back" : "Restor"); + + ptr = sram_backup_buffer; + for (i = 0; i < FG_BACKUP_MAX; i++) { + address = fg_backup_regs[i].address; + offset = fg_backup_regs[i].offset; + len = fg_backup_regs[i].len; + if (save) + rc = fg_interleaved_mem_read(chip, ptr, address, + len, offset); + else + rc = fg_interleaved_mem_write(chip, ptr, address, + len, offset); + if (rc) { + pr_err("Error in reading %d bytes from %x[%d], rc=%d\n", + len, address, offset, rc); + break; + } + ptr += len; + } + + return rc; +} + +#define SOC_FG_RESET 0xF3 +#define RESET_MASK (BIT(7) | BIT(5)) +static int fg_reset(struct fg_chip *chip, bool reset) +{ + int rc; + + rc = fg_sec_masked_write(chip, chip->soc_base + SOC_FG_RESET, + 0xFF, reset ? RESET_MASK : 0, 1); + if (rc) + pr_err("Error in writing to 0x%x, rc=%d\n", SOC_FG_RESET, rc); + + return rc; +} + +static void fg_handle_battery_insertion(struct fg_chip *chip) +{ + reinit_completion(&chip->batt_id_avail); + reinit_completion(&chip->fg_reset_done); + schedule_delayed_work(&chip->batt_profile_init, 0); + cancel_delayed_work(&chip->update_sram_data); + schedule_delayed_work(&chip->update_sram_data, msecs_to_jiffies(0)); +} + + static int soc_to_setpoint(int soc) { return DIV_ROUND_CLOSEST(soc * 255, 100); @@ -1550,6 +2033,7 @@ static void batt_to_setpoint_adc(int vbatt_mv, u8 *data) val = DIV_ROUND_CLOSEST(vbatt_mv * 32768, 5000); data[0] = val & 0xFF; data[1] = val >> 8; + return; } static u8 batt_to_setpoint_8b(int vbatt_mv) @@ -1678,14 +2162,37 @@ out: return rc; } +#define VBATT_LOW_STS_BIT BIT(2) +static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts) +{ + int rc = 0; + u8 fg_batt_sts; + + rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1); + if (rc) + pr_err("spmi read failed: addr=%03X, rc=%d\n", + INT_RT_STS(chip->batt_base), rc); + else + *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT); + + return rc; +} + #define SOC_EMPTY BIT(3) static bool fg_is_batt_empty(struct fg_chip *chip) { u8 fg_soc_sts; int rc; + bool vbatt_low_sts; - rc = fg_read(chip, &fg_soc_sts, - INT_RT_STS(chip->soc_base), 1); + if (chip->use_vbat_low_empty_soc) { + if (fg_get_vbatt_status(chip, &vbatt_low_sts)) + return false; + + return vbatt_low_sts; + } + + rc = fg_read(chip, &fg_soc_sts, INT_RT_STS(chip->soc_base), 1); if (rc) { pr_err("spmi read failed: addr=%03X, rc=%d\n", INT_RT_STS(chip->soc_base), rc); @@ -1732,7 +2239,16 @@ static int get_monotonic_soc_raw(struct fg_chip *chip) #define FULL_SOC_RAW 0xFF static int get_prop_capacity(struct fg_chip *chip) { - int msoc; + int msoc, rc; + bool vbatt_low_sts; + + if (chip->use_last_soc && chip->last_soc) { + if (chip->last_soc == FULL_SOC_RAW) + return FULL_CAPACITY; + return DIV_ROUND_CLOSEST((chip->last_soc - 1) * + (FULL_CAPACITY - 2), + FULL_SOC_RAW - 2) + 1; + } if (chip->battery_missing) return MISSING_CAPACITY; @@ -1747,10 +2263,28 @@ static int get_prop_capacity(struct fg_chip *chip) return EMPTY_CAPACITY; } msoc = get_monotonic_soc_raw(chip); - if (msoc == 0) - return EMPTY_CAPACITY; - else if (msoc == FULL_SOC_RAW) + if (msoc == 0) { + if (fg_reset_on_lockup && chip->use_vbat_low_empty_soc) { + rc = fg_get_vbatt_status(chip, &vbatt_low_sts); + if (rc) { + pr_err("Error in reading vbatt_status, rc=%d\n", + rc); + return EMPTY_CAPACITY; + } + + if (!vbatt_low_sts) + return DIV_ROUND_CLOSEST((chip->last_soc - 1) * + (FULL_CAPACITY - 2), + FULL_SOC_RAW - 2) + 1; + else + return EMPTY_CAPACITY; + } else { + return EMPTY_CAPACITY; + } + } else if (msoc == FULL_SOC_RAW) { return FULL_CAPACITY; + } + return DIV_ROUND_CLOSEST((msoc - 1) * (FULL_CAPACITY - 2), FULL_SOC_RAW - 2) + 1; } @@ -1843,6 +2377,25 @@ static int set_prop_sense_type(struct fg_chip *chip, int ext_sense_type) return 0; } +#define IGNORE_FALSE_NEGATIVE_ISENSE_BIT BIT(3) +static int set_prop_ignore_false_negative_isense(struct fg_chip *chip, + bool ignore) +{ + int rc; + + rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT, + IGNORE_FALSE_NEGATIVE_ISENSE_BIT, + ignore ? IGNORE_FALSE_NEGATIVE_ISENSE_BIT : 0, + EXTERNAL_SENSE_OFFSET); + if (rc) { + pr_err("failed to %s isense false negative ignore rc=%d\n", + ignore ? "enable" : "disable", rc); + return rc; + } + + return 0; +} + #define EXPONENT_MASK 0xF800 #define MANTISSA_MASK 0x3FF #define SIGN BIT(10) @@ -1953,8 +2506,7 @@ static int fg_is_batt_id_valid(struct fg_chip *chip) return rc; } - if (fg_debug_mask & FG_IRQS) - pr_info("fg batt sts 0x%x\n", fg_batt_sts); + pr_debug("fg batt sts 0x%x\n", fg_batt_sts); return (fg_batt_sts & BATT_IDED) ? 1 : 0; } @@ -1984,7 +2536,7 @@ static int64_t twos_compliment_extend(int64_t val, int nbytes) #define DECIKELVIN 2730 #define SRAM_PERIOD_NO_ID_UPDATE_MS 100 #define FULL_PERCENT_28BIT 0xFFFFFFF -static void update_sram_data(struct fg_chip *chip, int *resched_ms) +static int update_sram_data(struct fg_chip *chip, int *resched_ms) { int i, j, rc = 0; u8 reg[4]; @@ -2060,6 +2612,31 @@ static void update_sram_data(struct fg_chip *chip, int *resched_ms) } fg_mem_release(chip); + /* Backup the registers whenever no error happens during update */ + if (fg_reset_on_lockup && !chip->ima_error_handling) { + if (!rc) { + if (fg_debug_mask & FG_STATUS) + pr_info("backing up SRAM registers\n"); + rc = fg_backup_sram_registers(chip, true); + if (rc) { + pr_err("Couldn't save sram registers\n"); + goto out; + } + if (!chip->use_last_soc) { + chip->last_soc = get_monotonic_soc_raw(chip); + chip->last_cc_soc = div64_s64( + (int64_t)chip->last_soc * + FULL_PERCENT_28BIT, FULL_SOC_RAW); + } + if (fg_debug_mask & FG_STATUS) + pr_info("last_soc: %d last_cc_soc: %lld\n", + chip->last_soc, chip->last_cc_soc); + } else { + pr_err("update_sram failed\n"); + goto out; + } + } + if (!rc) get_current_time(&chip->last_sram_update_time); @@ -2070,7 +2647,55 @@ resched: } else { *resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS; } +out: fg_relax(&chip->update_sram_wakeup_source); + return rc; +} + +#define SANITY_CHECK_PERIOD_MS 5000 +static void check_sanity_work(struct work_struct *work) +{ + struct fg_chip *chip = container_of(work, + struct fg_chip, + check_sanity_work.work); + int rc = 0; + u8 beat_count; + bool tried_once = false; + + fg_stay_awake(&chip->sanity_wakeup_source); + +try_again: + rc = fg_read(chip, &beat_count, + chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1); + if (rc) { + pr_err("failed to read beat count rc=%d\n", rc); + goto resched; + } + + if (fg_debug_mask & FG_STATUS) + pr_info("current: %d, prev: %d\n", beat_count, + chip->last_beat_count); + + if (chip->last_beat_count == beat_count) { + if (!tried_once) { + /* Wait for 1 FG cycle and read it once again */ + msleep(1500); + tried_once = true; + goto try_again; + } else { + pr_err("Beat count not updating\n"); + fg_check_ima_error_handling(chip); + goto out; + } + } else { + chip->last_beat_count = beat_count; + } +resched: + schedule_delayed_work( + &chip->check_sanity_work, + msecs_to_jiffies(SANITY_CHECK_PERIOD_MS)); +out: + fg_relax(&chip->sanity_wakeup_source); } #define SRAM_TIMEOUT_MS 3000 @@ -2079,8 +2704,9 @@ static void update_sram_data_work(struct work_struct *work) struct fg_chip *chip = container_of(work, struct fg_chip, update_sram_data.work); - int resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS, ret; + int resched_ms, ret; bool tried_again = false; + int rc = 0; wait: /* Wait for MEMIF access revoked */ @@ -2094,14 +2720,19 @@ wait: goto wait; } else if (ret <= 0) { pr_err("transaction timed out ret=%d\n", ret); + if (fg_is_batt_id_valid(chip)) + resched_ms = fg_sram_update_period_ms; + else + resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS; goto out; } - update_sram_data(chip, &resched_ms); + rc = update_sram_data(chip, &resched_ms); out: - schedule_delayed_work( - &chip->update_sram_data, - msecs_to_jiffies(resched_ms)); + if (!rc) + schedule_delayed_work( + &chip->update_sram_data, + msecs_to_jiffies(resched_ms)); } #define BATT_TEMP_OFFSET 3 @@ -2115,6 +2746,8 @@ out: TEMP_SENSE_CHARGE_BIT) #define TEMP_PERIOD_UPDATE_MS 10000 #define TEMP_PERIOD_TIMEOUT_MS 3000 +#define BATT_TEMP_LOW_LIMIT -600 +#define BATT_TEMP_HIGH_LIMIT 1500 static void update_temp_data(struct work_struct *work) { s16 temp; @@ -2166,14 +2799,44 @@ wait: } temp = reg[0] | (reg[1] << 8); - fg_data[0].value = (temp * TEMP_LSB_16B / 1000) - - DECIKELVIN; + temp = (temp * TEMP_LSB_16B / 1000) - DECIKELVIN; + + /* + * If temperature is within the specified range (e.g. -60C and 150C), + * update it to the userspace. Otherwise, use the last read good + * temperature. + */ + if (temp > chip->batt_temp_low_limit && + temp < chip->batt_temp_high_limit) { + chip->last_good_temp = temp; + fg_data[0].value = temp; + } else { + fg_data[0].value = chip->last_good_temp; + + /* + * If the temperature is read before and seems to be in valid + * range, then a bad temperature reading could be because of + * FG lockup. Trigger the FG reset sequence in such cases. + */ + if (chip->last_temp_update_time && fg_reset_on_lockup && + (chip->last_good_temp > chip->batt_temp_low_limit && + chip->last_good_temp < chip->batt_temp_high_limit)) { + pr_err("Batt_temp is %d !, triggering FG reset\n", + temp); + fg_check_ima_error_handling(chip); + } + } if (fg_debug_mask & FG_MEM_DEBUG_READS) pr_info("BATT_TEMP %d %d\n", temp, fg_data[0].value); get_current_time(&chip->last_temp_update_time); + if (chip->soc_slope_limiter_en) { + fg_stay_awake(&chip->slope_limit_wakeup_source); + schedule_work(&chip->slope_limiter_work); + } + out: if (chip->sw_rbias_ctrl) { rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT, @@ -2226,18 +2889,6 @@ static int fg_set_resume_soc(struct fg_chip *chip, u8 threshold) return rc; } -#define VBATT_LOW_STS_BIT BIT(2) -static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts) -{ - int rc = 0; - u8 fg_batt_sts; - - rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1); - if (!rc) - *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT); - return rc; -} - #define BATT_CYCLE_NUMBER_REG 0x5E8 #define BATT_CYCLE_OFFSET 0 static void restore_cycle_counter(struct fg_chip *chip) @@ -2301,6 +2952,9 @@ static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket) bucket, rc); else chip->cyc_ctr.count[bucket] = cyc_count; + + if (fg_debug_mask & FG_POWER_SUPPLY) + pr_info("Stored bucket %d cyc_count: %d\n", bucket, cyc_count); return rc; } @@ -2416,6 +3070,62 @@ static int bcap_uah_2b(u8 *buffer) return ((int)val) * 1000; } +#define SLOPE_LIMITER_COEFF_REG 0x430 +#define SLOPE_LIMITER_COEFF_OFFSET 3 +#define SLOPE_LIMIT_TEMP_THRESHOLD 100 +#define SLOPE_LIMIT_LOW_TEMP_CHG 45 +#define SLOPE_LIMIT_HIGH_TEMP_CHG 2 +#define SLOPE_LIMIT_LOW_TEMP_DISCHG 45 +#define SLOPE_LIMIT_HIGH_TEMP_DISCHG 2 +static void slope_limiter_work(struct work_struct *work) +{ + struct fg_chip *chip = container_of(work, struct fg_chip, + slope_limiter_work); + enum slope_limit_status status; + int batt_temp, rc; + u8 buf[2]; + int64_t val; + + batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP); + + if (chip->status == POWER_SUPPLY_STATUS_CHARGING || + chip->status == POWER_SUPPLY_STATUS_FULL) { + if (batt_temp < chip->slope_limit_temp) + status = LOW_TEMP_CHARGE; + else + status = HIGH_TEMP_CHARGE; + } else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) { + if (batt_temp < chip->slope_limit_temp) + status = LOW_TEMP_DISCHARGE; + else + status = HIGH_TEMP_DISCHARGE; + } else { + goto out; + } + + if (status == chip->slope_limit_sts) + goto out; + + val = chip->slope_limit_coeffs[status]; + val *= MICRO_UNIT; + half_float_to_buffer(val, buf); + rc = fg_mem_write(chip, buf, + SLOPE_LIMITER_COEFF_REG, 2, + SLOPE_LIMITER_COEFF_OFFSET, 0); + if (rc) { + pr_err("Couldn't write to slope_limiter_coeff_reg, rc=%d\n", + rc); + goto out; + } + + chip->slope_limit_sts = status; + if (fg_debug_mask & FG_STATUS) + pr_info("Slope limit sts: %d val: %lld buf[%x %x] written\n", + status, val, buf[0], buf[1]); +out: + fg_relax(&chip->slope_limit_wakeup_source); +} + static int lookup_ocv_for_soc(struct fg_chip *chip, int soc) { int64_t *coeffs; @@ -2481,6 +3191,7 @@ static int lookup_soc_for_ocv(struct fg_chip *chip, int ocv) #define ESR_ACTUAL_REG 0x554 #define BATTERY_ESR_REG 0x4F4 #define TEMP_RS_TO_RSLOW_REG 0x514 +#define ESR_OFFSET 2 static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity) { int64_t ocv_cutoff_new, ocv_cutoff_aged, temp_rs_to_rslow; @@ -2519,7 +3230,7 @@ static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity) rc = fg_mem_read(chip, buffer, ESR_ACTUAL_REG, 2, 2, 0); esr_actual = half_float(buffer); - rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, 2, 0); + rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0); battery_esr = half_float(buffer); if (rc) { @@ -2548,13 +3259,13 @@ static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity) /* calculate soc_cutoff_new */ val = (1000000LL + temp_rs_to_rslow) * battery_esr; - do_div(val, 1000000); + val = div64_s64(val, 1000000); ocv_cutoff_new = div64_s64(chip->evaluation_current * val, 1000) + chip->cutoff_voltage; /* calculate soc_cutoff_aged */ val = (1000000LL + temp_rs_to_rslow) * esr_actual; - do_div(val, 1000000); + val = div64_s64(val, 1000000); ocv_cutoff_aged = div64_s64(chip->evaluation_current * val, 1000) + chip->cutoff_voltage; @@ -2594,124 +3305,6 @@ static void battery_age_work(struct work_struct *work) estimate_battery_age(chip, &chip->actual_cap_uah); } -static enum power_supply_property fg_power_props[] = { - POWER_SUPPLY_PROP_CAPACITY, - POWER_SUPPLY_PROP_CAPACITY_RAW, - POWER_SUPPLY_PROP_CURRENT_NOW, - POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_VOLTAGE_OCV, - POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, - POWER_SUPPLY_PROP_CHARGE_NOW, - POWER_SUPPLY_PROP_CHARGE_NOW_RAW, - POWER_SUPPLY_PROP_CHARGE_NOW_ERROR, - POWER_SUPPLY_PROP_CHARGE_FULL, - POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, - POWER_SUPPLY_PROP_TEMP, - POWER_SUPPLY_PROP_COOL_TEMP, - POWER_SUPPLY_PROP_WARM_TEMP, - POWER_SUPPLY_PROP_RESISTANCE, - POWER_SUPPLY_PROP_RESISTANCE_ID, - POWER_SUPPLY_PROP_BATTERY_TYPE, - POWER_SUPPLY_PROP_UPDATE_NOW, - POWER_SUPPLY_PROP_ESR_COUNT, - POWER_SUPPLY_PROP_VOLTAGE_MIN, - POWER_SUPPLY_PROP_CYCLE_COUNT, - POWER_SUPPLY_PROP_CYCLE_COUNT_ID, - POWER_SUPPLY_PROP_HI_POWER, -}; - -static int fg_power_get_property(struct power_supply *psy, - enum power_supply_property psp, - union power_supply_propval *val) -{ - struct fg_chip *chip = power_supply_get_drvdata(psy); - bool vbatt_low_sts; - - switch (psp) { - case POWER_SUPPLY_PROP_BATTERY_TYPE: - if (chip->battery_missing) - val->strval = missing_batt_type; - else if (chip->fg_restarting) - val->strval = loading_batt_type; - else - val->strval = chip->batt_type; - break; - case POWER_SUPPLY_PROP_CAPACITY: - val->intval = get_prop_capacity(chip); - break; - case POWER_SUPPLY_PROP_CAPACITY_RAW: - val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC); - break; - case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR: - val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR); - break; - case POWER_SUPPLY_PROP_CURRENT_NOW: - val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT); - break; - case POWER_SUPPLY_PROP_VOLTAGE_NOW: - val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE); - break; - case POWER_SUPPLY_PROP_VOLTAGE_OCV: - val->intval = get_sram_prop_now(chip, FG_DATA_OCV); - break; - case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: - val->intval = chip->batt_max_voltage_uv; - break; - case POWER_SUPPLY_PROP_TEMP: - val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP); - break; - case POWER_SUPPLY_PROP_COOL_TEMP: - val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD); - break; - case POWER_SUPPLY_PROP_WARM_TEMP: - val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT); - break; - case POWER_SUPPLY_PROP_RESISTANCE: - val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR); - break; - case POWER_SUPPLY_PROP_ESR_COUNT: - val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT); - break; - case POWER_SUPPLY_PROP_CYCLE_COUNT: - val->intval = fg_get_cycle_count(chip); - break; - case POWER_SUPPLY_PROP_CYCLE_COUNT_ID: - val->intval = chip->cyc_ctr.id; - break; - case POWER_SUPPLY_PROP_RESISTANCE_ID: - val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID); - break; - case POWER_SUPPLY_PROP_UPDATE_NOW: - val->intval = 0; - break; - case POWER_SUPPLY_PROP_VOLTAGE_MIN: - if (!fg_get_vbatt_status(chip, &vbatt_low_sts)) - val->intval = (int)vbatt_low_sts; - else - val->intval = 1; - break; - case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: - val->intval = chip->nom_cap_uah; - break; - case POWER_SUPPLY_PROP_CHARGE_FULL: - val->intval = chip->learning_data.learned_cc_uah; - break; - case POWER_SUPPLY_PROP_CHARGE_NOW: - val->intval = chip->learning_data.cc_uah; - break; - case POWER_SUPPLY_PROP_CHARGE_NOW_RAW: - val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE); - break; - case POWER_SUPPLY_PROP_HI_POWER: - val->intval = !!chip->bcl_lpm_disabled; - break; - default: - return -EINVAL; - } - - return 0; -} - static int correction_times[] = { 1470, 2940, @@ -2853,11 +3446,8 @@ static void fg_cap_learning_work(struct work_struct *work) goto fail; } - if (chip->wa_flag & USE_CC_SOC_REG) { - mutex_unlock(&chip->learning_data.learning_lock); - fg_relax(&chip->capacity_learning_wakeup_source); - return; - } + if (chip->wa_flag & USE_CC_SOC_REG) + goto fail; fg_mem_lock(chip); @@ -2888,6 +3478,8 @@ static void fg_cap_learning_work(struct work_struct *work) pr_info("total_cc_uah = %lld\n", chip->learning_data.cc_uah); fail: + if (chip->wa_flag & USE_CC_SOC_REG) + fg_relax(&chip->capacity_learning_wakeup_source); mutex_unlock(&chip->learning_data.learning_lock); return; @@ -2901,7 +3493,7 @@ static int fg_get_cc_soc(struct fg_chip *chip, int *cc_soc) { int rc; u8 reg[4]; - unsigned int temp, magnitude; + int temp; rc = fg_mem_read(chip, reg, CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0); if (rc) { @@ -2910,20 +3502,61 @@ static int fg_get_cc_soc(struct fg_chip *chip, int *cc_soc) } temp = reg[3] << 24 | reg[2] << 16 | reg[1] << 8 | reg[0]; - magnitude = temp & CC_SOC_MAGNITUDE_MASK; - if (temp & CC_SOC_NEGATIVE_BIT) - *cc_soc = -1 * (~magnitude + 1); - else - *cc_soc = magnitude; - + *cc_soc = sign_extend32(temp, 29); return 0; } +static int fg_get_current_cc(struct fg_chip *chip) +{ + int cc_soc, rc; + int64_t current_capacity; + + if (!(chip->wa_flag & USE_CC_SOC_REG)) + return chip->learning_data.cc_uah; + + if (!chip->learning_data.learned_cc_uah) + return -EINVAL; + + rc = fg_get_cc_soc(chip, &cc_soc); + if (rc < 0) { + pr_err("Failed to get cc_soc, rc=%d\n", rc); + return rc; + } + + current_capacity = cc_soc * chip->learning_data.learned_cc_uah; + current_capacity = div64_u64(current_capacity, FULL_PERCENT_28BIT); + return current_capacity; +} + +#define BATT_MISSING_STS BIT(6) +static bool is_battery_missing(struct fg_chip *chip) +{ + int rc; + u8 fg_batt_sts; + + rc = fg_read(chip, &fg_batt_sts, + INT_RT_STS(chip->batt_base), 1); + if (rc) { + pr_err("spmi read failed: addr=%03X, rc=%d\n", + INT_RT_STS(chip->batt_base), rc); + return false; + } + + return (fg_batt_sts & BATT_MISSING_STS) ? true : false; +} + static int fg_cap_learning_process_full_data(struct fg_chip *chip) { int cc_pc_val, rc = -EINVAL; unsigned int cc_soc_delta_pc; int64_t delta_cc_uah; + uint64_t temp; + bool batt_missing = is_battery_missing(chip); + + if (batt_missing) { + pr_err("Battery is missing!\n"); + goto fail; + } if (!chip->learning_data.active) goto fail; @@ -2940,9 +3573,8 @@ static int fg_cap_learning_process_full_data(struct fg_chip *chip) goto fail; } - cc_soc_delta_pc = DIV_ROUND_CLOSEST( - abs(cc_pc_val - chip->learning_data.init_cc_pc_val) - * 100, FULL_PERCENT_28BIT); + temp = abs(cc_pc_val - chip->learning_data.init_cc_pc_val); + cc_soc_delta_pc = DIV_ROUND_CLOSEST_ULL(temp * 100, FULL_PERCENT_28BIT); delta_cc_uah = div64_s64( chip->learning_data.learned_cc_uah * cc_soc_delta_pc, @@ -2950,8 +3582,11 @@ static int fg_cap_learning_process_full_data(struct fg_chip *chip) chip->learning_data.cc_uah = delta_cc_uah + chip->learning_data.cc_uah; if (fg_debug_mask & FG_AGING) - pr_info("current cc_soc=%d cc_soc_pc=%d total_cc_uah = %lld\n", + pr_info("current cc_soc=%d cc_soc_pc=%d init_cc_pc_val=%d delta_cc_uah=%lld learned_cc_uah=%lld total_cc_uah = %lld\n", cc_pc_val, cc_soc_delta_pc, + chip->learning_data.init_cc_pc_val, + delta_cc_uah, + chip->learning_data.learned_cc_uah, chip->learning_data.cc_uah); return 0; @@ -3044,6 +3679,12 @@ static void fg_cap_learning_save_data(struct fg_chip *chip) { int16_t cc_mah; int rc; + bool batt_missing = is_battery_missing(chip); + + if (batt_missing) { + pr_err("Battery is missing!\n"); + return; + } cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000); @@ -3065,14 +3706,20 @@ static void fg_cap_learning_save_data(struct fg_chip *chip) static void fg_cap_learning_post_process(struct fg_chip *chip) { int64_t max_inc_val, min_dec_val, old_cap; + bool batt_missing = is_battery_missing(chip); + + if (batt_missing) { + pr_err("Battery is missing!\n"); + return; + } max_inc_val = chip->learning_data.learned_cc_uah * (1000 + chip->learning_data.max_increment); - do_div(max_inc_val, 1000); + max_inc_val = div_s64(max_inc_val, 1000); min_dec_val = chip->learning_data.learned_cc_uah * (1000 - chip->learning_data.max_decrement); - do_div(min_dec_val, 1000); + min_dec_val = div_s64(min_dec_val, 1000); old_cap = chip->learning_data.learned_cc_uah; if (chip->learning_data.cc_uah > max_inc_val) @@ -3083,6 +3730,32 @@ static void fg_cap_learning_post_process(struct fg_chip *chip) chip->learning_data.learned_cc_uah = chip->learning_data.cc_uah; + if (chip->learning_data.max_cap_limit) { + max_inc_val = (int64_t)chip->nom_cap_uah * (1000 + + chip->learning_data.max_cap_limit); + max_inc_val = div64_u64(max_inc_val, 1000); + if (chip->learning_data.cc_uah > max_inc_val) { + if (fg_debug_mask & FG_AGING) + pr_info("learning capacity %lld goes above max limit %lld\n", + chip->learning_data.cc_uah, + max_inc_val); + chip->learning_data.learned_cc_uah = max_inc_val; + } + } + + if (chip->learning_data.min_cap_limit) { + min_dec_val = (int64_t)chip->nom_cap_uah * (1000 - + chip->learning_data.min_cap_limit); + min_dec_val = div64_u64(min_dec_val, 1000); + if (chip->learning_data.cc_uah < min_dec_val) { + if (fg_debug_mask & FG_AGING) + pr_info("learning capacity %lld goes below min limit %lld\n", + chip->learning_data.cc_uah, + min_dec_val); + chip->learning_data.learned_cc_uah = min_dec_val; + } + } + fg_cap_learning_save_data(chip); if (fg_debug_mask & FG_AGING) pr_info("final cc_uah = %lld, learned capacity %lld -> %lld uah\n", @@ -3142,7 +3815,7 @@ static int fg_cap_learning_check(struct fg_chip *chip) if (battery_soc * 100 / FULL_PERCENT_3B > chip->learning_data.max_start_soc) { if (fg_debug_mask & FG_AGING) - pr_info("battery soc too low (%d < %d), aborting\n", + pr_info("battery soc too high (%d > %d), aborting\n", battery_soc * 100 / FULL_PERCENT_3B, chip->learning_data.max_start_soc); fg_mem_release(chip); @@ -3226,6 +3899,17 @@ static int fg_cap_learning_check(struct fg_chip *chip) } fg_cap_learning_stop(chip); + } else if (chip->status == POWER_SUPPLY_STATUS_FULL) { + if (chip->wa_flag & USE_CC_SOC_REG) { + /* reset SW_CC_SOC register to 100% upon charge_full */ + rc = fg_mem_write(chip, (u8 *)&cc_pc_100, + CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0); + if (rc) + pr_err("Failed to reset CC_SOC_REG rc=%d\n", + rc); + else if (fg_debug_mask & FG_STATUS) + pr_info("Reset SW_CC_SOC to full value\n"); + } } fail: @@ -3323,7 +4007,14 @@ static void status_change_work(struct work_struct *work) struct fg_chip, status_change_work); unsigned long current_time = 0; - int cc_soc, rc, capacity = get_prop_capacity(chip); + int cc_soc, batt_soc, rc, capacity = get_prop_capacity(chip); + bool batt_missing = is_battery_missing(chip); + + if (batt_missing) { + if (fg_debug_mask & FG_STATUS) + pr_info("Battery is missing\n"); + return; + } if (chip->esr_pulse_tune_en) { fg_stay_awake(&chip->esr_extract_wakeup_source); @@ -3343,19 +4034,34 @@ static void status_change_work(struct work_struct *work) } if (chip->status == POWER_SUPPLY_STATUS_FULL || chip->status == POWER_SUPPLY_STATUS_CHARGING) { - if (!chip->vbat_low_irq_enabled) { + if (!chip->vbat_low_irq_enabled && + !chip->use_vbat_low_empty_soc) { enable_irq(chip->batt_irq[VBATT_LOW].irq); enable_irq_wake(chip->batt_irq[VBATT_LOW].irq); chip->vbat_low_irq_enabled = true; } + + if (!chip->full_soc_irq_enabled) { + enable_irq(chip->soc_irq[FULL_SOC].irq); + enable_irq_wake(chip->soc_irq[FULL_SOC].irq); + chip->full_soc_irq_enabled = true; + } + if (!!(chip->wa_flag & PULSE_REQUEST_WA) && capacity == 100) fg_configure_soc(chip); } else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) { - if (chip->vbat_low_irq_enabled) { + if (chip->vbat_low_irq_enabled && + !chip->use_vbat_low_empty_soc) { disable_irq_wake(chip->batt_irq[VBATT_LOW].irq); disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq); chip->vbat_low_irq_enabled = false; } + + if (chip->full_soc_irq_enabled) { + disable_irq_wake(chip->soc_irq[FULL_SOC].irq); + disable_irq_nosync(chip->soc_irq[FULL_SOC].irq); + chip->full_soc_irq_enabled = false; + } } fg_cap_learning_check(chip); schedule_work(&chip->update_esr_work); @@ -3368,6 +4074,42 @@ static void status_change_work(struct work_struct *work) } if (chip->prev_status != chip->status && chip->last_sram_update_time) { + /* + * Reset SW_CC_SOC to a value based off battery SOC when + * the device is discharging. + */ + if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) { + batt_soc = get_battery_soc_raw(chip); + if (!batt_soc) + return; + + batt_soc = div64_s64((int64_t)batt_soc * + FULL_PERCENT_28BIT, FULL_PERCENT_3B); + rc = fg_mem_write(chip, (u8 *)&batt_soc, + CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0); + if (rc) + pr_err("Failed to reset CC_SOC_REG rc=%d\n", + rc); + else if (fg_debug_mask & FG_STATUS) + pr_info("Reset SW_CC_SOC to %x\n", batt_soc); + } + + /* + * Schedule the update_temp_work whenever there is a status + * change. This is essential for applying the slope limiter + * coefficients when that feature is enabled. + */ + if (chip->last_temp_update_time && chip->soc_slope_limiter_en) { + cancel_delayed_work_sync(&chip->update_temp_work); + schedule_delayed_work(&chip->update_temp_work, + msecs_to_jiffies(0)); + } + + if (chip->dischg_gain.enable) { + fg_stay_awake(&chip->dischg_gain_wakeup_source); + schedule_work(&chip->dischg_gain_work); + } + get_current_time(¤t_time); /* * When charging status changes, update SRAM parameters if it @@ -3393,10 +4135,10 @@ static void status_change_work(struct work_struct *work) } if ((chip->wa_flag & USE_CC_SOC_REG) && chip->bad_batt_detection_en && chip->safety_timer_expired) { - chip->sw_cc_soc_data.delta_soc = - DIV_ROUND_CLOSEST(abs(cc_soc - - chip->sw_cc_soc_data.init_cc_soc) - * 100, FULL_PERCENT_28BIT); + uint64_t delta_cc_soc = abs(cc_soc - + chip->sw_cc_soc_data.init_cc_soc); + chip->sw_cc_soc_data.delta_soc = DIV_ROUND_CLOSEST_ULL( + delta_cc_soc * 100, FULL_PERCENT_28BIT); chip->sw_cc_soc_data.full_capacity = chip->sw_cc_soc_data.delta_soc + chip->sw_cc_soc_data.init_sys_soc; @@ -3539,6 +4281,395 @@ static int fg_init_batt_temp_state(struct fg_chip *chip) return rc; } +static int fg_restore_cc_soc(struct fg_chip *chip) +{ + int rc; + + if (!chip->use_last_cc_soc || !chip->last_cc_soc) + return 0; + + if (fg_debug_mask & FG_STATUS) + pr_info("Restoring cc_soc: %lld\n", chip->last_cc_soc); + + rc = fg_mem_write(chip, (u8 *)&chip->last_cc_soc, + fg_data[FG_DATA_CC_CHARGE].address, 4, + fg_data[FG_DATA_CC_CHARGE].offset, 0); + if (rc) + pr_err("failed to update CC_SOC rc=%d\n", rc); + else + chip->use_last_cc_soc = false; + + return rc; +} + +#define SRAM_MONOTONIC_SOC_REG 0x574 +#define SRAM_MONOTONIC_SOC_OFFSET 2 +static int fg_restore_soc(struct fg_chip *chip) +{ + int rc; + u16 msoc; + + if (chip->use_last_soc && chip->last_soc) + msoc = DIV_ROUND_CLOSEST(chip->last_soc * 0xFFFF, + FULL_SOC_RAW); + else + return 0; + + if (fg_debug_mask & FG_STATUS) + pr_info("Restored soc: %d\n", msoc); + + rc = fg_mem_write(chip, (u8 *)&msoc, SRAM_MONOTONIC_SOC_REG, 2, + SRAM_MONOTONIC_SOC_OFFSET, 0); + if (rc) + pr_err("failed to write M_SOC_REG rc=%d\n", rc); + + return rc; +} + +#define NOM_CAP_REG 0x4F4 +#define CAPACITY_DELTA_DECIPCT 500 +static int load_battery_aging_data(struct fg_chip *chip) +{ + int rc = 0; + u8 buffer[2]; + int16_t cc_mah; + int64_t delta_cc_uah, pct_nom_cap_uah; + + rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0); + if (rc) { + pr_err("Failed to read nominal capacitance: %d\n", rc); + goto out; + } + + chip->nom_cap_uah = bcap_uah_2b(buffer); + chip->actual_cap_uah = chip->nom_cap_uah; + + if (chip->learning_data.learned_cc_uah == 0) { + chip->learning_data.learned_cc_uah = chip->nom_cap_uah; + fg_cap_learning_save_data(chip); + } else if (chip->learning_data.feedback_on) { + delta_cc_uah = abs(chip->learning_data.learned_cc_uah - + chip->nom_cap_uah); + pct_nom_cap_uah = div64_s64((int64_t)chip->nom_cap_uah * + CAPACITY_DELTA_DECIPCT, 1000); + /* + * If the learned capacity is out of range, say by 50% + * from the nominal capacity, then overwrite the learned + * capacity with the nominal capacity. + */ + if (chip->nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) { + if (fg_debug_mask & FG_AGING) { + pr_info("learned_cc_uah: %lld is higher than expected\n", + chip->learning_data.learned_cc_uah); + pr_info("Capping it to nominal:%d\n", + chip->nom_cap_uah); + } + chip->learning_data.learned_cc_uah = chip->nom_cap_uah; + fg_cap_learning_save_data(chip); + } else { + cc_mah = div64_s64(chip->learning_data.learned_cc_uah, + 1000); + rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah); + if (rc) + pr_err("Error in restoring cc_soc_coeff, rc:%d\n", + rc); + } + } +out: + return rc; +} + +static void fg_restore_battery_info(struct fg_chip *chip) +{ + int rc; + char buf[4] = {0, 0, 0, 0}; + + chip->last_soc = DIV_ROUND_CLOSEST(chip->batt_info[BATT_INFO_SOC] * + FULL_SOC_RAW, FULL_CAPACITY); + chip->last_cc_soc = div64_s64((int64_t)chip->last_soc * + FULL_PERCENT_28BIT, FULL_SOC_RAW); + chip->use_last_soc = true; + chip->use_last_cc_soc = true; + rc = fg_restore_soc(chip); + if (rc) { + pr_err("Error in restoring soc, rc=%d\n", rc); + goto out; + } + + rc = fg_restore_cc_soc(chip); + if (rc) { + pr_err("Error in restoring cc_soc, rc=%d\n", rc); + goto out; + } + + rc = fg_mem_write(chip, buf, + fg_data[FG_DATA_VINT_ERR].address, + fg_data[FG_DATA_VINT_ERR].len, + fg_data[FG_DATA_VINT_ERR].offset, 0); + if (rc) { + pr_err("Failed to write to VINT_ERR, rc=%d\n", rc); + goto out; + } + + chip->learning_data.learned_cc_uah = chip->batt_info[BATT_INFO_FCC]; + rc = load_battery_aging_data(chip); + if (rc) { + pr_err("Failed to load battery aging data, rc:%d\n", rc); + goto out; + } + + if (chip->power_supply_registered) + power_supply_changed(chip->bms_psy); + + if (fg_debug_mask & FG_STATUS) + pr_info("Restored battery info!\n"); + +out: + return; +} + +#define DELTA_BATT_TEMP 30 +static bool fg_validate_battery_info(struct fg_chip *chip) +{ + int i, delta_pct, batt_id_kohm, batt_temp, batt_volt_mv, batt_soc; + + for (i = 1; i < BATT_INFO_MAX; i++) { + if (fg_debug_mask & FG_STATUS) + pr_info("batt_info[%d]: %d\n", i, chip->batt_info[i]); + + if ((chip->batt_info[i] == 0 && i != BATT_INFO_TEMP) || + chip->batt_info[i] == INT_MAX) { + if (fg_debug_mask & FG_STATUS) + pr_info("batt_info[%d]:%d is invalid\n", i, + chip->batt_info[i]); + return false; + } + } + + batt_id_kohm = get_sram_prop_now(chip, FG_DATA_BATT_ID) / 1000; + if (batt_id_kohm != chip->batt_info[BATT_INFO_RES_ID]) { + if (fg_debug_mask & FG_STATUS) + pr_info("batt_id(%dK) does not match the stored batt_id(%dK)\n", + batt_id_kohm, + chip->batt_info[BATT_INFO_RES_ID]); + return false; + } + + batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP); + if (abs(chip->batt_info[BATT_INFO_TEMP] - batt_temp) > + DELTA_BATT_TEMP) { + if (fg_debug_mask & FG_STATUS) + pr_info("batt_temp(%d) is higher/lower than stored batt_temp(%d)\n", + batt_temp, chip->batt_info[BATT_INFO_TEMP]); + return false; + } + + if (chip->batt_info[BATT_INFO_FCC] < 0) { + if (fg_debug_mask & FG_STATUS) + pr_info("batt_fcc cannot be %d\n", + chip->batt_info[BATT_INFO_FCC]); + return false; + } + + batt_volt_mv = get_sram_prop_now(chip, FG_DATA_VOLTAGE) / 1000; + batt_soc = get_monotonic_soc_raw(chip); + if (batt_soc != 0 && batt_soc != FULL_SOC_RAW) + batt_soc = DIV_ROUND_CLOSEST((batt_soc - 1) * + (FULL_CAPACITY - 2), FULL_SOC_RAW - 2) + 1; + + if (*chip->batt_range_ocv && chip->batt_max_voltage_uv > 1000) + delta_pct = DIV_ROUND_CLOSEST(abs(batt_volt_mv - + chip->batt_info[BATT_INFO_VOLTAGE]) * 100, + chip->batt_max_voltage_uv / 1000); + else + delta_pct = abs(batt_soc - chip->batt_info[BATT_INFO_SOC]); + + if (fg_debug_mask & FG_STATUS) + pr_info("Validating by %s batt_voltage:%d capacity:%d delta_pct:%d\n", + *chip->batt_range_ocv ? "OCV" : "SOC", batt_volt_mv, + batt_soc, delta_pct); + + if (*chip->batt_range_pct && delta_pct > *chip->batt_range_pct) { + if (fg_debug_mask & FG_STATUS) + pr_info("delta_pct(%d) is higher than batt_range_pct(%d)\n", + delta_pct, *chip->batt_range_pct); + return false; + } + + return true; +} + +static int fg_set_battery_info(struct fg_chip *chip, int val) +{ + if (chip->batt_info_id < 0 || + chip->batt_info_id >= BATT_INFO_MAX) { + pr_err("Invalid batt_info_id %d\n", chip->batt_info_id); + chip->batt_info_id = 0; + return -EINVAL; + } + + if (chip->batt_info_id == BATT_INFO_NOTIFY && val == INT_MAX - 1) { + if (fg_debug_mask & FG_STATUS) + pr_info("Notified from userspace\n"); + if (chip->batt_info_restore && !chip->ima_error_handling) { + if (!fg_validate_battery_info(chip)) { + if (fg_debug_mask & FG_STATUS) + pr_info("Validating battery info failed\n"); + } else { + fg_restore_battery_info(chip); + } + } + } + + chip->batt_info[chip->batt_info_id] = val; + return 0; +} + +static enum power_supply_property fg_power_props[] = { + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_RAW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_VOLTAGE_OCV, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, + POWER_SUPPLY_PROP_CHARGE_COUNTER, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_CHARGE_NOW_RAW, + POWER_SUPPLY_PROP_CHARGE_NOW_ERROR, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_COOL_TEMP, + POWER_SUPPLY_PROP_WARM_TEMP, + POWER_SUPPLY_PROP_RESISTANCE, + POWER_SUPPLY_PROP_RESISTANCE_ID, + POWER_SUPPLY_PROP_BATTERY_TYPE, + POWER_SUPPLY_PROP_UPDATE_NOW, + POWER_SUPPLY_PROP_ESR_COUNT, + POWER_SUPPLY_PROP_VOLTAGE_MIN, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_CYCLE_COUNT_ID, + POWER_SUPPLY_PROP_HI_POWER, + POWER_SUPPLY_PROP_SOC_REPORTING_READY, + POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE, + POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION, + POWER_SUPPLY_PROP_BATTERY_INFO, + POWER_SUPPLY_PROP_BATTERY_INFO_ID, +}; + +static int fg_power_get_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct fg_chip *chip = power_supply_get_drvdata(psy); + bool vbatt_low_sts; + + switch (psp) { + case POWER_SUPPLY_PROP_BATTERY_TYPE: + if (chip->battery_missing) + val->strval = missing_batt_type; + else if (chip->fg_restarting) + val->strval = loading_batt_type; + else + val->strval = chip->batt_type; + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = get_prop_capacity(chip); + break; + case POWER_SUPPLY_PROP_CAPACITY_RAW: + val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC); + break; + case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR: + val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR); + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE); + break; + case POWER_SUPPLY_PROP_VOLTAGE_OCV: + val->intval = get_sram_prop_now(chip, FG_DATA_OCV); + break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + val->intval = chip->batt_max_voltage_uv; + break; + case POWER_SUPPLY_PROP_TEMP: + val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP); + break; + case POWER_SUPPLY_PROP_COOL_TEMP: + val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD); + break; + case POWER_SUPPLY_PROP_WARM_TEMP: + val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT); + break; + case POWER_SUPPLY_PROP_RESISTANCE: + val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR); + break; + case POWER_SUPPLY_PROP_ESR_COUNT: + val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT); + break; + case POWER_SUPPLY_PROP_CYCLE_COUNT: + val->intval = fg_get_cycle_count(chip); + break; + case POWER_SUPPLY_PROP_CYCLE_COUNT_ID: + val->intval = chip->cyc_ctr.id; + break; + case POWER_SUPPLY_PROP_RESISTANCE_ID: + val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID); + break; + case POWER_SUPPLY_PROP_UPDATE_NOW: + val->intval = 0; + break; + case POWER_SUPPLY_PROP_VOLTAGE_MIN: + if (!fg_get_vbatt_status(chip, &vbatt_low_sts)) + val->intval = (int)vbatt_low_sts; + else + val->intval = 1; + break; + case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: + val->intval = chip->nom_cap_uah; + break; + case POWER_SUPPLY_PROP_CHARGE_FULL: + val->intval = chip->learning_data.learned_cc_uah; + break; + case POWER_SUPPLY_PROP_CHARGE_NOW: + val->intval = chip->learning_data.cc_uah; + break; + case POWER_SUPPLY_PROP_CHARGE_NOW_RAW: + val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE); + break; + case POWER_SUPPLY_PROP_CHARGE_COUNTER: + val->intval = fg_get_current_cc(chip); + break; + case POWER_SUPPLY_PROP_HI_POWER: + val->intval = !!chip->bcl_lpm_disabled; + break; + case POWER_SUPPLY_PROP_SOC_REPORTING_READY: + val->intval = !!chip->soc_reporting_ready; + break; + case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE: + val->intval = !chip->allow_false_negative_isense; + break; + case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION: + val->intval = chip->use_soft_jeita_irq; + break; + case POWER_SUPPLY_PROP_BATTERY_INFO: + if (chip->batt_info_id < 0 || + chip->batt_info_id >= BATT_INFO_MAX) + return -EINVAL; + val->intval = chip->batt_info[chip->batt_info_id]; + break; + case POWER_SUPPLY_PROP_BATTERY_INFO_ID: + val->intval = chip->batt_info_id; + break; + default: + return -EINVAL; + } + + return 0; +} + static int fg_power_set_property(struct power_supply *psy, enum power_supply_property psp, const union power_supply_propval *val) @@ -3557,6 +4688,67 @@ static int fg_power_set_property(struct power_supply *psy, if (val->intval) update_sram_data(chip, &unused); break; + case POWER_SUPPLY_PROP_IGNORE_FALSE_NEGATIVE_ISENSE: + rc = set_prop_ignore_false_negative_isense(chip, !!val->intval); + if (rc) + pr_err("set_prop_ignore_false_negative_isense failed, rc=%d\n", + rc); + else + chip->allow_false_negative_isense = !val->intval; + break; + case POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION: + if (chip->use_soft_jeita_irq == !!val->intval) { + pr_debug("JEITA irq %s, ignore!\n", + chip->use_soft_jeita_irq ? + "enabled" : "disabled"); + break; + } + chip->use_soft_jeita_irq = !!val->intval; + if (chip->use_soft_jeita_irq) { + if (chip->batt_irq[JEITA_SOFT_COLD].disabled) { + enable_irq( + chip->batt_irq[JEITA_SOFT_COLD].irq); + chip->batt_irq[JEITA_SOFT_COLD].disabled = + false; + } + if (!chip->batt_irq[JEITA_SOFT_COLD].wakeup) { + enable_irq_wake( + chip->batt_irq[JEITA_SOFT_COLD].irq); + chip->batt_irq[JEITA_SOFT_COLD].wakeup = true; + } + if (chip->batt_irq[JEITA_SOFT_HOT].disabled) { + enable_irq( + chip->batt_irq[JEITA_SOFT_HOT].irq); + chip->batt_irq[JEITA_SOFT_HOT].disabled = false; + } + if (!chip->batt_irq[JEITA_SOFT_HOT].wakeup) { + enable_irq_wake( + chip->batt_irq[JEITA_SOFT_HOT].irq); + chip->batt_irq[JEITA_SOFT_HOT].wakeup = true; + } + } else { + if (chip->batt_irq[JEITA_SOFT_COLD].wakeup) { + disable_irq_wake( + chip->batt_irq[JEITA_SOFT_COLD].irq); + chip->batt_irq[JEITA_SOFT_COLD].wakeup = false; + } + if (!chip->batt_irq[JEITA_SOFT_COLD].disabled) { + disable_irq_nosync( + chip->batt_irq[JEITA_SOFT_COLD].irq); + chip->batt_irq[JEITA_SOFT_COLD].disabled = true; + } + if (chip->batt_irq[JEITA_SOFT_HOT].wakeup) { + disable_irq_wake( + chip->batt_irq[JEITA_SOFT_HOT].irq); + chip->batt_irq[JEITA_SOFT_HOT].wakeup = false; + } + if (!chip->batt_irq[JEITA_SOFT_HOT].disabled) { + disable_irq_nosync( + chip->batt_irq[JEITA_SOFT_HOT].irq); + chip->batt_irq[JEITA_SOFT_HOT].disabled = true; + } + } + break; case POWER_SUPPLY_PROP_STATUS: chip->prev_status = chip->status; chip->status = val->intval; @@ -3599,6 +4791,12 @@ static int fg_power_set_property(struct power_supply *psy, schedule_work(&chip->bcl_hi_power_work); } break; + case POWER_SUPPLY_PROP_BATTERY_INFO: + rc = fg_set_battery_info(chip, val->intval); + break; + case POWER_SUPPLY_PROP_BATTERY_INFO_ID: + chip->batt_info_id = val->intval; + break; default: return -EINVAL; }; @@ -3613,6 +4811,8 @@ static int fg_property_is_writeable(struct power_supply *psy, case POWER_SUPPLY_PROP_COOL_TEMP: case POWER_SUPPLY_PROP_WARM_TEMP: case POWER_SUPPLY_PROP_CYCLE_COUNT_ID: + case POWER_SUPPLY_PROP_BATTERY_INFO: + case POWER_SUPPLY_PROP_BATTERY_INFO_ID: return 1; default: break; @@ -3807,21 +5007,197 @@ done: fg_relax(&chip->gain_comp_wakeup_source); } -#define BATT_MISSING_STS BIT(6) -static bool is_battery_missing(struct fg_chip *chip) +static void cc_soc_store_work(struct work_struct *work) +{ + struct fg_chip *chip = container_of(work, struct fg_chip, + cc_soc_store_work); + int cc_soc_pct; + + if (!chip->nom_cap_uah) { + pr_err("nom_cap_uah zero!\n"); + fg_relax(&chip->cc_soc_wakeup_source); + return; + } + + cc_soc_pct = get_sram_prop_now(chip, FG_DATA_CC_CHARGE); + cc_soc_pct = div64_s64(cc_soc_pct * 100, + chip->nom_cap_uah); + chip->last_cc_soc = div64_s64((int64_t)chip->last_soc * + FULL_PERCENT_28BIT, FULL_SOC_RAW); + + if (fg_debug_mask & FG_STATUS) + pr_info("cc_soc_pct: %d last_cc_soc: %lld\n", cc_soc_pct, + chip->last_cc_soc); + + if (fg_reset_on_lockup && (chip->cc_soc_limit_pct > 0 && + cc_soc_pct >= chip->cc_soc_limit_pct)) { + pr_err("CC_SOC out of range\n"); + fg_check_ima_error_handling(chip); + } + + fg_relax(&chip->cc_soc_wakeup_source); +} + +#define HARD_JEITA_ALARM_CHECK_NS 10000000000ULL +static enum alarmtimer_restart fg_hard_jeita_alarm_cb(struct alarm *alarm, + ktime_t now) +{ + struct fg_chip *chip = container_of(alarm, + struct fg_chip, hard_jeita_alarm); + int rc, health = POWER_SUPPLY_HEALTH_UNKNOWN; + u8 regval; + bool batt_hot, batt_cold; + union power_supply_propval val = {0, }; + + if (!is_usb_present(chip)) { + pr_debug("USB plugged out, stop the timer!\n"); + return ALARMTIMER_NORESTART; + } + + rc = fg_read(chip, ®val, BATT_INFO_STS(chip->batt_base), 1); + if (rc) { + pr_err("read batt_sts failed, rc=%d\n", rc); + goto recheck; + } + + batt_hot = !!(regval & JEITA_HARD_HOT_RT_STS); + batt_cold = !!(regval & JEITA_HARD_COLD_RT_STS); + if (batt_hot && batt_cold) { + pr_debug("Hot && cold can't co-exist\n"); + goto recheck; + } + + if ((batt_hot == chip->batt_hot) && (batt_cold == chip->batt_cold)) { + pr_debug("battery JEITA state not changed, ignore\n"); + goto recheck; + } + + if (batt_cold != chip->batt_cold) { + /* cool --> cold */ + if (chip->batt_cool) { + chip->batt_cool = false; + chip->batt_cold = true; + health = POWER_SUPPLY_HEALTH_COLD; + } else if (chip->batt_cold) { /* cold --> cool */ + chip->batt_cool = true; + chip->batt_cold = false; + health = POWER_SUPPLY_HEALTH_COOL; + } + } + + if (batt_hot != chip->batt_hot) { + /* warm --> hot */ + if (chip->batt_warm) { + chip->batt_warm = false; + chip->batt_hot = true; + health = POWER_SUPPLY_HEALTH_OVERHEAT; + } else if (chip->batt_hot) { /* hot --> warm */ + chip->batt_hot = false; + chip->batt_warm = true; + health = POWER_SUPPLY_HEALTH_WARM; + } + } + + if (health != POWER_SUPPLY_HEALTH_UNKNOWN) { + pr_debug("FG report battery health: %d\n", health); + val.intval = health; + rc = power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_HEALTH, &val); + if (rc) + pr_err("Set batt_psy health: %d failed\n", health); + } + +recheck: + alarm_forward_now(alarm, ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS)); + return ALARMTIMER_RESTART; +} + +#define BATT_SOFT_COLD_STS BIT(0) +#define BATT_SOFT_HOT_STS BIT(1) +static irqreturn_t fg_jeita_soft_hot_irq_handler(int irq, void *_chip) { int rc; - u8 fg_batt_sts; + struct fg_chip *chip = _chip; + u8 regval; + bool batt_warm; + union power_supply_propval val = {0, }; - rc = fg_read(chip, &fg_batt_sts, - INT_RT_STS(chip->batt_base), 1); + if (!is_charger_available(chip)) + return IRQ_HANDLED; + + rc = fg_read(chip, ®val, INT_RT_STS(chip->batt_base), 1); if (rc) { pr_err("spmi read failed: addr=%03X, rc=%d\n", INT_RT_STS(chip->batt_base), rc); - return false; + return IRQ_HANDLED; } - return (fg_batt_sts & BATT_MISSING_STS) ? true : false; + batt_warm = !!(regval & BATT_SOFT_HOT_STS); + if (chip->batt_warm == batt_warm) { + pr_debug("warm state not change, ignore!\n"); + return IRQ_HANDLED; + } + + chip->batt_warm = batt_warm; + if (batt_warm) { + val.intval = POWER_SUPPLY_HEALTH_WARM; + power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_HEALTH, &val); + /* kick the alarm timer for hard hot polling */ + alarm_start_relative(&chip->hard_jeita_alarm, + ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS)); + } else { + val.intval = POWER_SUPPLY_HEALTH_GOOD; + power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_HEALTH, &val); + /* cancel the alarm timer */ + alarm_try_to_cancel(&chip->hard_jeita_alarm); + } + + return IRQ_HANDLED; +} + +static irqreturn_t fg_jeita_soft_cold_irq_handler(int irq, void *_chip) +{ + int rc; + struct fg_chip *chip = _chip; + u8 regval; + bool batt_cool; + union power_supply_propval val = {0, }; + + if (!is_charger_available(chip)) + return IRQ_HANDLED; + + rc = fg_read(chip, ®val, INT_RT_STS(chip->batt_base), 1); + if (rc) { + pr_err("spmi read failed: addr=%03X, rc=%d\n", + INT_RT_STS(chip->batt_base), rc); + return IRQ_HANDLED; + } + + batt_cool = !!(regval & BATT_SOFT_COLD_STS); + if (chip->batt_cool == batt_cool) { + pr_debug("cool state not change, ignore\n"); + return IRQ_HANDLED; + } + + chip->batt_cool = batt_cool; + if (batt_cool) { + val.intval = POWER_SUPPLY_HEALTH_COOL; + power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_HEALTH, &val); + /* kick the alarm timer for hard cold polling */ + alarm_start_relative(&chip->hard_jeita_alarm, + ns_to_ktime(HARD_JEITA_ALARM_CHECK_NS)); + } else { + val.intval = POWER_SUPPLY_HEALTH_GOOD; + power_supply_set_property(chip->batt_psy, + POWER_SUPPLY_PROP_HEALTH, &val); + /* cancel the alarm timer */ + alarm_try_to_cancel(&chip->hard_jeita_alarm); + } + + return IRQ_HANDLED; } #define SOC_FIRST_EST_DONE BIT(5) @@ -3841,21 +5217,40 @@ static bool is_first_est_done(struct fg_chip *chip) return (fg_soc_sts & SOC_FIRST_EST_DONE) ? true : false; } +#define FG_EMPTY_DEBOUNCE_MS 1500 static irqreturn_t fg_vbatt_low_handler(int irq, void *_chip) { struct fg_chip *chip = _chip; - int rc; bool vbatt_low_sts; if (fg_debug_mask & FG_IRQS) pr_info("vbatt-low triggered\n"); - if (chip->status == POWER_SUPPLY_STATUS_CHARGING) { - rc = fg_get_vbatt_status(chip, &vbatt_low_sts); - if (rc) { - pr_err("error in reading vbatt_status, rc:%d\n", rc); + /* handle empty soc based on vbatt-low interrupt */ + if (chip->use_vbat_low_empty_soc) { + if (fg_get_vbatt_status(chip, &vbatt_low_sts)) goto out; + + if (vbatt_low_sts) { + if (fg_debug_mask & FG_IRQS) + pr_info("Vbatt is low\n"); + disable_irq_wake(chip->batt_irq[VBATT_LOW].irq); + disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq); + chip->vbat_low_irq_enabled = false; + fg_stay_awake(&chip->empty_check_wakeup_source); + schedule_delayed_work(&chip->check_empty_work, + msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS)); + } else { + if (fg_debug_mask & FG_IRQS) + pr_info("Vbatt is high\n"); + chip->soc_empty = false; } + goto out; + } + + if (chip->status == POWER_SUPPLY_STATUS_CHARGING) { + if (fg_get_vbatt_status(chip, &vbatt_low_sts)) + goto out; if (!vbatt_low_sts && chip->vbat_low_irq_enabled) { if (fg_debug_mask & FG_IRQS) pr_info("disabling vbatt_low irq\n"); @@ -3876,8 +5271,10 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *_chip) bool batt_missing = is_battery_missing(chip); if (batt_missing) { + fg_cap_learning_stop(chip); chip->battery_missing = true; chip->profile_loaded = false; + chip->soc_reporting_ready = false; chip->batt_type = default_batt_type; mutex_lock(&chip->cyc_ctr.lock); if (fg_debug_mask & FG_IRQS) @@ -3885,17 +5282,10 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *_chip) clear_cycle_counter(chip); mutex_unlock(&chip->cyc_ctr.lock); } else { - if (!chip->use_otp_profile) { - reinit_completion(&chip->batt_id_avail); - reinit_completion(&chip->first_soc_done); - schedule_delayed_work(&chip->batt_profile_init, 0); - cancel_delayed_work(&chip->update_sram_data); - schedule_delayed_work( - &chip->update_sram_data, - msecs_to_jiffies(0)); - } else { + if (!chip->use_otp_profile) + fg_handle_battery_insertion(chip); + else chip->battery_missing = false; - } } if (fg_debug_mask & FG_IRQS) @@ -3943,7 +5333,7 @@ static irqreturn_t fg_soc_irq_handler(int irq, void *_chip) { struct fg_chip *chip = _chip; u8 soc_rt_sts; - int rc; + int rc, msoc; rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1); if (rc) { @@ -3954,6 +5344,37 @@ static irqreturn_t fg_soc_irq_handler(int irq, void *_chip) if (fg_debug_mask & FG_IRQS) pr_info("triggered 0x%x\n", soc_rt_sts); + if (chip->dischg_gain.enable) { + fg_stay_awake(&chip->dischg_gain_wakeup_source); + schedule_work(&chip->dischg_gain_work); + } + + if (chip->soc_slope_limiter_en) { + fg_stay_awake(&chip->slope_limit_wakeup_source); + schedule_work(&chip->slope_limiter_work); + } + + /* Backup last soc every delta soc interrupt */ + chip->use_last_soc = false; + if (fg_reset_on_lockup) { + if (!chip->ima_error_handling) + chip->last_soc = get_monotonic_soc_raw(chip); + if (fg_debug_mask & FG_STATUS) + pr_info("last_soc: %d\n", chip->last_soc); + + fg_stay_awake(&chip->cc_soc_wakeup_source); + schedule_work(&chip->cc_soc_store_work); + } + + if (chip->use_vbat_low_empty_soc) { + msoc = get_monotonic_soc_raw(chip); + if (msoc == 0 || chip->soc_empty) { + fg_stay_awake(&chip->empty_check_wakeup_source); + schedule_delayed_work(&chip->check_empty_work, + msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS)); + } + } + schedule_work(&chip->battery_age_work); if (chip->power_supply_registered) @@ -3988,7 +5409,6 @@ static irqreturn_t fg_soc_irq_handler(int irq, void *_chip) return IRQ_HANDLED; } -#define FG_EMPTY_DEBOUNCE_MS 1500 static irqreturn_t fg_empty_soc_irq_handler(int irq, void *_chip) { struct fg_chip *chip = _chip; @@ -4100,16 +5520,15 @@ done: fg_relax(&chip->resume_soc_wakeup_source); } - #define OCV_COEFFS_START_REG 0x4C0 #define OCV_JUNCTION_REG 0x4D8 -#define NOM_CAP_REG 0x4F4 #define CUTOFF_VOLTAGE_REG 0x40C #define RSLOW_CFG_REG 0x538 #define RSLOW_CFG_OFFSET 2 #define RSLOW_THRESH_REG 0x52C #define RSLOW_THRESH_OFFSET 0 -#define TEMP_RS_TO_RSLOW_OFFSET 2 +#define RS_TO_RSLOW_CHG_OFFSET 2 +#define RS_TO_RSLOW_DISCHG_OFFSET 0 #define RSLOW_COMP_REG 0x528 #define RSLOW_COMP_C1_OFFSET 0 #define RSLOW_COMP_C2_OFFSET 2 @@ -4117,7 +5536,6 @@ static int populate_system_data(struct fg_chip *chip) { u8 buffer[24]; int rc, i; - int16_t cc_mah; fg_mem_lock(chip); rc = fg_mem_read(chip, buffer, OCV_COEFFS_START_REG, 24, 0, 0); @@ -4138,30 +5556,21 @@ static int populate_system_data(struct fg_chip *chip) chip->ocv_coeffs[8], chip->ocv_coeffs[9], chip->ocv_coeffs[10], chip->ocv_coeffs[11]); } - rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 0, 0); - chip->ocv_junction_p1p2 = buffer[0] * 100 / 255; - rc |= fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 1, 0); - chip->ocv_junction_p2p3 = buffer[0] * 100 / 255; + rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 2, 0, 0); if (rc) { pr_err("Failed to read ocv junctions: %d\n", rc); goto done; } - rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0); + + chip->ocv_junction_p1p2 = buffer[0] * 100 / 255; + chip->ocv_junction_p2p3 = buffer[1] * 100 / 255; + + rc = load_battery_aging_data(chip); if (rc) { - pr_err("Failed to read nominal capacitance: %d\n", rc); + pr_err("Failed to load battery aging data, rc:%d\n", rc); goto done; } - chip->nom_cap_uah = bcap_uah_2b(buffer); - chip->actual_cap_uah = chip->nom_cap_uah; - if (chip->learning_data.learned_cc_uah == 0) { - chip->learning_data.learned_cc_uah = chip->nom_cap_uah; - fg_cap_learning_save_data(chip); - } else if (chip->learning_data.feedback_on) { - cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000); - rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah); - if (rc) - pr_err("Error in restoring cc_soc_coeff, rc:%d\n", rc); - } + rc = fg_mem_read(chip, buffer, CUTOFF_VOLTAGE_REG, 2, 0, 0); if (rc) { pr_err("Failed to read cutoff voltage: %d\n", rc); @@ -4188,9 +5597,9 @@ static int populate_system_data(struct fg_chip *chip) } chip->rslow_comp.rslow_thr = buffer[0]; rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, - RSLOW_THRESH_OFFSET, 0); + RS_TO_RSLOW_CHG_OFFSET, 0); if (rc) { - pr_err("unable to read rs to rslow: %d\n", rc); + pr_err("unable to read rs to rslow_chg: %d\n", rc); goto done; } memcpy(chip->rslow_comp.rs_to_rslow, buffer, 2); @@ -4207,6 +5616,68 @@ done: return rc; } +static int fg_update_batt_rslow_settings(struct fg_chip *chip) +{ + int64_t rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr, rconn_uohm; + u8 buffer[2]; + int rc; + + rc = fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, ESR_OFFSET, 0); + if (rc) { + pr_err("unable to read battery_esr: %d\n", rc); + goto done; + } + batt_esr = half_float(buffer); + + rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, + RS_TO_RSLOW_DISCHG_OFFSET, 0); + if (rc) { + pr_err("unable to read rs to rslow dischg: %d\n", rc); + goto done; + } + rs_to_rslow_dischg = half_float(buffer); + + rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, + RS_TO_RSLOW_CHG_OFFSET, 0); + if (rc) { + pr_err("unable to read rs to rslow chg: %d\n", rc); + goto done; + } + rs_to_rslow_chg = half_float(buffer); + + if (fg_debug_mask & FG_STATUS) + pr_info("rs_rslow_chg: %lld, rs_rslow_dischg: %lld, esr: %lld\n", + rs_to_rslow_chg, rs_to_rslow_dischg, batt_esr); + + rconn_uohm = chip->rconn_mohm * 1000; + rs_to_rslow_dischg = div64_s64(rs_to_rslow_dischg * batt_esr, + batt_esr + rconn_uohm); + rs_to_rslow_chg = div64_s64(rs_to_rslow_chg * batt_esr, + batt_esr + rconn_uohm); + + half_float_to_buffer(rs_to_rslow_chg, buffer); + rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, + RS_TO_RSLOW_CHG_OFFSET, 0); + if (rc) { + pr_err("unable to write rs_to_rslow_chg: %d\n", rc); + goto done; + } + + half_float_to_buffer(rs_to_rslow_dischg, buffer); + rc = fg_mem_write(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, + RS_TO_RSLOW_DISCHG_OFFSET, 0); + if (rc) { + pr_err("unable to write rs_to_rslow_dischg: %d\n", rc); + goto done; + } + + if (fg_debug_mask & FG_STATUS) + pr_info("Modified rs_rslow_chg: %lld, rs_rslow_dischg: %lld\n", + rs_to_rslow_chg, rs_to_rslow_dischg); +done: + return rc; +} + #define RSLOW_CFG_MASK (BIT(2) | BIT(3) | BIT(4) | BIT(5)) #define RSLOW_CFG_ON_VAL (BIT(2) | BIT(3)) #define RSLOW_THRESH_FULL_VAL 0xFF @@ -4233,7 +5704,7 @@ static int fg_rslow_charge_comp_set(struct fg_chip *chip) half_float_to_buffer(chip->rslow_comp.chg_rs_to_rslow, buffer); rc = fg_mem_write(chip, buffer, - TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0); + TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0); if (rc) { pr_err("unable to write rs to rslow: %d\n", rc); goto done; @@ -4286,7 +5757,7 @@ static int fg_rslow_charge_comp_clear(struct fg_chip *chip) } rc = fg_mem_write(chip, chip->rslow_comp.rs_to_rslow, - TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0); + TEMP_RS_TO_RSLOW_REG, 2, RS_TO_RSLOW_CHG_OFFSET, 0); if (rc) { pr_err("unable to write rs to rslow: %d\n", rc); goto done; @@ -4510,6 +5981,58 @@ static void esr_extract_config_work(struct work_struct *work) fg_relax(&chip->esr_extract_wakeup_source); } +#define KI_COEFF_MEDC_REG 0x400 +#define KI_COEFF_MEDC_OFFSET 0 +#define KI_COEFF_HIGHC_REG 0x404 +#define KI_COEFF_HIGHC_OFFSET 0 +#define DEFAULT_MEDC_VOLTAGE_GAIN 3 +#define DEFAULT_HIGHC_VOLTAGE_GAIN 2 +static void discharge_gain_work(struct work_struct *work) +{ + struct fg_chip *chip = container_of(work, struct fg_chip, + dischg_gain_work); + u8 buf[2]; + int capacity, rc, i; + int64_t medc_val = DEFAULT_MEDC_VOLTAGE_GAIN; + int64_t highc_val = DEFAULT_HIGHC_VOLTAGE_GAIN; + + capacity = get_prop_capacity(chip); + if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) { + for (i = VOLT_GAIN_MAX - 1; i >= 0; i--) { + if (capacity <= chip->dischg_gain.soc[i]) { + medc_val = chip->dischg_gain.medc_gain[i]; + highc_val = chip->dischg_gain.highc_gain[i]; + } + } + } + + if (fg_debug_mask & FG_STATUS) + pr_info("Capacity: %d, medc_gain: %lld highc_gain: %lld\n", + capacity, medc_val, highc_val); + + medc_val *= MICRO_UNIT; + half_float_to_buffer(medc_val, buf); + rc = fg_mem_write(chip, buf, KI_COEFF_MEDC_REG, 2, + KI_COEFF_MEDC_OFFSET, 0); + if (rc) + pr_err("Couldn't write to ki_coeff_medc_reg, rc=%d\n", rc); + else if (fg_debug_mask & FG_STATUS) + pr_info("Value [%x %x] written to ki_coeff_medc\n", buf[0], + buf[1]); + + highc_val *= MICRO_UNIT; + half_float_to_buffer(highc_val, buf); + rc = fg_mem_write(chip, buf, KI_COEFF_HIGHC_REG, 2, + KI_COEFF_HIGHC_OFFSET, 0); + if (rc) + pr_err("Couldn't write to ki_coeff_highc_reg, rc=%d\n", rc); + else if (fg_debug_mask & FG_STATUS) + pr_info("Value [%x %x] written to ki_coeff_highc\n", buf[0], + buf[1]); + + fg_relax(&chip->dischg_gain_wakeup_source); +} + #define LOW_LATENCY BIT(6) #define BATT_PROFILE_OFFSET 0x4C0 #define PROFILE_INTEGRITY_REG 0x53C @@ -4529,7 +6052,7 @@ static int fg_do_restart(struct fg_chip *chip, bool write_profile) pr_info("restarting fuel gauge...\n"); try_again: - if (write_profile) { + if (write_profile && !chip->ima_error_handling) { if (!chip->charging_disabled) { pr_err("Charging not yet disabled!\n"); return -EINVAL; @@ -4770,7 +6293,8 @@ fail: #define BATTERY_PSY_WAIT_MS 2000 static int fg_batt_profile_init(struct fg_chip *chip) { - int rc = 0, ret, len, batt_id; + int rc = 0, ret; + int len, batt_id; struct device_node *node = chip->pdev->dev.of_node; struct device_node *batt_node, *profile_node; const char *data, *batt_type_str; @@ -4792,6 +6316,19 @@ wait: goto no_profile; } + /* Check whether the charger is ready */ + if (!is_charger_available(chip)) + goto reschedule; + + /* Disable charging for a FG cycle before calculating vbat_in_range */ + if (!chip->charging_disabled) { + rc = set_prop_enable_charging(chip, false); + if (rc) + pr_err("Failed to disable charging, rc=%d\n", rc); + + goto update; + } + batt_node = of_find_node_by_name(node, "qcom,battery-data"); if (!batt_node) { pr_warn("No available batterydata, using OTP defaults\n"); @@ -4808,8 +6345,12 @@ wait: fg_batt_type); if (IS_ERR_OR_NULL(profile_node)) { rc = PTR_ERR(profile_node); - pr_err("couldn't find profile handle %d\n", rc); - goto no_profile; + if (rc == -EPROBE_DEFER) { + goto reschedule; + } else { + pr_err("couldn't find profile handle rc=%d\n", rc); + goto no_profile; + } } /* read rslow compensation values if they're available */ @@ -4903,18 +6444,6 @@ wait: goto no_profile; } - /* Check whether the charger is ready */ - if (!is_charger_available(chip)) - goto reschedule; - - /* Disable charging for a FG cycle before calculating vbat_in_range */ - if (!chip->charging_disabled) { - rc = set_prop_enable_charging(chip, false); - if (rc) - pr_err("Failed to disable charging, rc=%d\n", rc); - - goto reschedule; - } vbat_in_range = get_vbat_est_diff(chip) < settings[FG_MEM_VBAT_EST_DIFF].value * 1000; @@ -4956,11 +6485,7 @@ wait: chip->batt_profile, len, false); } - if (chip->power_supply_registered) - power_supply_changed(chip->bms_psy); - memcpy(chip->batt_profile, data, len); - chip->batt_profile_len = len; if (fg_debug_mask & FG_STATUS) @@ -4995,6 +6520,11 @@ wait: } } + if (chip->rconn_mohm > 0) { + rc = fg_update_batt_rslow_settings(chip); + if (rc) + pr_err("Error in updating ESR, rc=%d\n", rc); + } done: if (chip->charging_disabled) { rc = set_prop_enable_charging(chip, true); @@ -5008,8 +6538,22 @@ done: chip->batt_type = fg_batt_type; else chip->batt_type = batt_type_str; + + if (chip->first_profile_loaded && fg_reset_on_lockup) { + if (fg_debug_mask & FG_STATUS) + pr_info("restoring SRAM registers\n"); + rc = fg_backup_sram_registers(chip, false); + if (rc) + pr_err("Couldn't restore sram registers\n"); + + /* Read the cycle counter back from FG SRAM */ + if (chip->cyc_ctr.en) + restore_cycle_counter(chip); + } + chip->first_profile_loaded = true; chip->profile_loaded = true; + chip->soc_reporting_ready = true; chip->battery_missing = is_battery_missing(chip); update_chg_iterm(chip); update_cc_cv_setpoint(chip); @@ -5025,8 +6569,10 @@ done: fg_relax(&chip->profile_wakeup_source); pr_info("Battery SOC: %d, V: %duV\n", get_prop_capacity(chip), fg_data[FG_DATA_VOLTAGE].value); + complete_all(&chip->fg_reset_done); return rc; no_profile: + chip->soc_reporting_ready = true; if (chip->charging_disabled) { rc = set_prop_enable_charging(chip, true); if (rc) @@ -5039,14 +6585,15 @@ no_profile: power_supply_changed(chip->bms_psy); fg_relax(&chip->profile_wakeup_source); return rc; -reschedule: - schedule_delayed_work( - &chip->batt_profile_init, - msecs_to_jiffies(BATTERY_PSY_WAIT_MS)); +update: cancel_delayed_work(&chip->update_sram_data); schedule_delayed_work( &chip->update_sram_data, msecs_to_jiffies(0)); +reschedule: + schedule_delayed_work( + &chip->batt_profile_init, + msecs_to_jiffies(BATTERY_PSY_WAIT_MS)); fg_relax(&chip->profile_wakeup_source); return 0; } @@ -5056,14 +6603,41 @@ static void check_empty_work(struct work_struct *work) struct fg_chip *chip = container_of(work, struct fg_chip, check_empty_work.work); + bool vbatt_low_sts; + int msoc; + + /* handle empty soc based on vbatt-low interrupt */ + if (chip->use_vbat_low_empty_soc) { + if (fg_get_vbatt_status(chip, &vbatt_low_sts)) + goto out; + + msoc = get_monotonic_soc_raw(chip); - if (fg_is_batt_empty(chip)) { + if (fg_debug_mask & FG_STATUS) + pr_info("Vbatt_low: %d, msoc: %d\n", vbatt_low_sts, + msoc); + if (vbatt_low_sts || (msoc == 0)) + chip->soc_empty = true; + else + chip->soc_empty = false; + + if (chip->power_supply_registered) + power_supply_changed(chip->bms_psy); + + if (!chip->vbat_low_irq_enabled) { + enable_irq(chip->batt_irq[VBATT_LOW].irq); + enable_irq_wake(chip->batt_irq[VBATT_LOW].irq); + chip->vbat_low_irq_enabled = true; + } + } else if (fg_is_batt_empty(chip)) { if (fg_debug_mask & FG_STATUS) pr_info("EMPTY SOC high\n"); chip->soc_empty = true; if (chip->power_supply_registered) power_supply_changed(chip->bms_psy); } + +out: fg_relax(&chip->empty_check_wakeup_source); } @@ -5103,7 +6677,7 @@ static void charge_full_work(struct work_struct *work) int rc; u8 buffer[3]; int bsoc; - int resume_soc_raw = FULL_SOC_RAW - settings[FG_MEM_RESUME_SOC].value; + int resume_soc_raw = settings[FG_MEM_RESUME_SOC].value; bool disable = false; u8 reg; @@ -5318,6 +6892,98 @@ do { \ } \ } while (0) +static int fg_dischg_gain_dt_init(struct fg_chip *chip) +{ + struct device_node *node = chip->pdev->dev.of_node; + struct property *prop; + int i, rc = 0; + size_t size; + + prop = of_find_property(node, "qcom,fg-dischg-voltage-gain-soc", + NULL); + if (!prop) { + pr_err("qcom-fg-dischg-voltage-gain-soc not specified\n"); + goto out; + } + + size = prop->length / sizeof(u32); + if (size != VOLT_GAIN_MAX) { + pr_err("Voltage gain SOC specified is of incorrect size\n"); + goto out; + } + + rc = of_property_read_u32_array(node, + "qcom,fg-dischg-voltage-gain-soc", chip->dischg_gain.soc, size); + if (rc < 0) { + pr_err("Reading qcom-fg-dischg-voltage-gain-soc failed, rc=%d\n", + rc); + goto out; + } + + for (i = 0; i < VOLT_GAIN_MAX; i++) { + if (chip->dischg_gain.soc[i] > 100) { + pr_err("Incorrect dischg-voltage-gain-soc\n"); + goto out; + } + } + + prop = of_find_property(node, "qcom,fg-dischg-med-voltage-gain", + NULL); + if (!prop) { + pr_err("qcom-fg-dischg-med-voltage-gain not specified\n"); + goto out; + } + + size = prop->length / sizeof(u32); + if (size != VOLT_GAIN_MAX) { + pr_err("med-voltage-gain specified is of incorrect size\n"); + goto out; + } + + rc = of_property_read_u32_array(node, + "qcom,fg-dischg-med-voltage-gain", chip->dischg_gain.medc_gain, + size); + if (rc < 0) { + pr_err("Reading qcom-fg-dischg-med-voltage-gain failed, rc=%d\n", + rc); + goto out; + } + + prop = of_find_property(node, "qcom,fg-dischg-high-voltage-gain", + NULL); + if (!prop) { + pr_err("qcom-fg-dischg-high-voltage-gain not specified\n"); + goto out; + } + + size = prop->length / sizeof(u32); + if (size != VOLT_GAIN_MAX) { + pr_err("high-voltage-gain specified is of incorrect size\n"); + goto out; + } + + rc = of_property_read_u32_array(node, + "qcom,fg-dischg-high-voltage-gain", + chip->dischg_gain.highc_gain, size); + if (rc < 0) { + pr_err("Reading qcom-fg-dischg-high-voltage-gain failed, rc=%d\n", + rc); + goto out; + } + + if (fg_debug_mask & FG_STATUS) { + for (i = 0; i < VOLT_GAIN_MAX; i++) + pr_info("SOC:%d MedC_Gain:%d HighC_Gain: %d\n", + chip->dischg_gain.soc[i], + chip->dischg_gain.medc_gain[i], + chip->dischg_gain.highc_gain[i]); + } + return 0; +out: + chip->dischg_gain.enable = false; + return rc; +} + #define DEFAULT_EVALUATION_CURRENT_MA 1000 static int fg_of_init(struct fg_chip *chip) { @@ -5395,6 +7061,10 @@ static int fg_of_init(struct fg_chip *chip) "cl-max-start-capacity", rc, 15); OF_READ_PROPERTY(chip->learning_data.vbat_est_thr_uv, "cl-vbat-est-thr-uv", rc, 40000); + OF_READ_PROPERTY(chip->learning_data.max_cap_limit, + "cl-max-limit-deciperc", rc, 0); + OF_READ_PROPERTY(chip->learning_data.min_cap_limit, + "cl-min-limit-deciperc", rc, 0); OF_READ_PROPERTY(chip->evaluation_current, "aging-eval-current-ma", rc, DEFAULT_EVALUATION_CURRENT_MA); @@ -5455,6 +7125,77 @@ static int fg_of_init(struct fg_chip *chip) chip->esr_pulse_tune_en = of_property_read_bool(node, "qcom,esr-pulse-tuning-en"); + chip->soc_slope_limiter_en = of_property_read_bool(node, + "qcom,fg-control-slope-limiter"); + if (chip->soc_slope_limiter_en) { + OF_READ_PROPERTY(chip->slope_limit_temp, + "fg-slope-limit-temp-threshold", rc, + SLOPE_LIMIT_TEMP_THRESHOLD); + + OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_CHARGE], + "fg-slope-limit-low-temp-chg", rc, + SLOPE_LIMIT_LOW_TEMP_CHG); + + OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_CHARGE], + "fg-slope-limit-high-temp-chg", rc, + SLOPE_LIMIT_HIGH_TEMP_CHG); + + OF_READ_PROPERTY(chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE], + "fg-slope-limit-low-temp-dischg", rc, + SLOPE_LIMIT_LOW_TEMP_DISCHG); + + OF_READ_PROPERTY(chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE], + "fg-slope-limit-high-temp-dischg", rc, + SLOPE_LIMIT_HIGH_TEMP_DISCHG); + + if (fg_debug_mask & FG_STATUS) + pr_info("slope-limiter, temp: %d coeffs: [%d %d %d %d]\n", + chip->slope_limit_temp, + chip->slope_limit_coeffs[LOW_TEMP_CHARGE], + chip->slope_limit_coeffs[HIGH_TEMP_CHARGE], + chip->slope_limit_coeffs[LOW_TEMP_DISCHARGE], + chip->slope_limit_coeffs[HIGH_TEMP_DISCHARGE]); + } + + OF_READ_PROPERTY(chip->rconn_mohm, "fg-rconn-mohm", rc, 0); + + chip->dischg_gain.enable = of_property_read_bool(node, + "qcom,fg-dischg-voltage-gain-ctrl"); + if (chip->dischg_gain.enable) { + rc = fg_dischg_gain_dt_init(chip); + if (rc) { + pr_err("Error in reading dischg_gain parameters, rc=%d\n", + rc); + rc = 0; + } + } + + chip->use_vbat_low_empty_soc = of_property_read_bool(node, + "qcom,fg-use-vbat-low-empty-soc"); + + OF_READ_PROPERTY(chip->batt_temp_low_limit, + "fg-batt-temp-low-limit", rc, BATT_TEMP_LOW_LIMIT); + + OF_READ_PROPERTY(chip->batt_temp_high_limit, + "fg-batt-temp-high-limit", rc, BATT_TEMP_HIGH_LIMIT); + + if (fg_debug_mask & FG_STATUS) + pr_info("batt-temp-low_limit: %d batt-temp-high_limit: %d\n", + chip->batt_temp_low_limit, chip->batt_temp_high_limit); + + OF_READ_PROPERTY(chip->cc_soc_limit_pct, "fg-cc-soc-limit-pct", rc, 0); + + if (fg_debug_mask & FG_STATUS) + pr_info("cc-soc-limit-pct: %d\n", chip->cc_soc_limit_pct); + + chip->batt_info_restore = of_property_read_bool(node, + "qcom,fg-restore-batt-info"); + + if (fg_debug_mask & FG_STATUS) + pr_info("restore: %d validate_by_ocv: %d range_pct: %d\n", + chip->batt_info_restore, fg_batt_valid_ocv, + fg_batt_range_pct); + return rc; } @@ -5528,15 +7269,22 @@ static int fg_init_irqs(struct fg_chip *chip) chip->soc_irq[FULL_SOC].irq, rc); return rc; } - rc = devm_request_irq(chip->dev, - chip->soc_irq[EMPTY_SOC].irq, - fg_empty_soc_irq_handler, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "empty-soc", chip); - if (rc < 0) { - pr_err("Can't request %d empty-soc: %d\n", - chip->soc_irq[EMPTY_SOC].irq, rc); - return rc; + enable_irq_wake(chip->soc_irq[FULL_SOC].irq); + chip->full_soc_irq_enabled = true; + + if (!chip->use_vbat_low_empty_soc) { + rc = devm_request_irq(chip->dev, + chip->soc_irq[EMPTY_SOC].irq, + fg_empty_soc_irq_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + "empty-soc", chip); + if (rc < 0) { + pr_err("Can't request %d empty-soc: %d\n", + chip->soc_irq[EMPTY_SOC].irq, + rc); + return rc; + } } rc = devm_request_irq(chip->dev, chip->soc_irq[DELTA_SOC].irq, @@ -5558,8 +7306,8 @@ static int fg_init_irqs(struct fg_chip *chip) } enable_irq_wake(chip->soc_irq[DELTA_SOC].irq); - enable_irq_wake(chip->soc_irq[FULL_SOC].irq); - enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq); + if (!chip->use_vbat_low_empty_soc) + enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq); break; case FG_MEMIF: chip->mem_irq[FG_MEM_AVAIL].irq @@ -5581,8 +7329,53 @@ static int fg_init_irqs(struct fg_chip *chip) } break; case FG_BATT: - chip->batt_irq[BATT_MISSING].irq - = of_irq_get_byname(child, "batt-missing"); + chip->batt_irq[JEITA_SOFT_COLD].irq = + of_irq_get_byname(child, "soft-cold"); + if (chip->batt_irq[JEITA_SOFT_COLD].irq < 0) { + pr_err("Unable to get soft-cold irq\n"); + rc = -EINVAL; + return rc; + } + rc = devm_request_threaded_irq(chip->dev, + chip->batt_irq[JEITA_SOFT_COLD].irq, + NULL, + fg_jeita_soft_cold_irq_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + "soft-cold", chip); + if (rc < 0) { + pr_err("Can't request %d soft-cold: %d\n", + chip->batt_irq[JEITA_SOFT_COLD].irq, + rc); + return rc; + } + disable_irq(chip->batt_irq[JEITA_SOFT_COLD].irq); + chip->batt_irq[JEITA_SOFT_COLD].disabled = true; + chip->batt_irq[JEITA_SOFT_HOT].irq = + of_irq_get_byname(child, "soft-hot"); + if (chip->batt_irq[JEITA_SOFT_HOT].irq < 0) { + pr_err("Unable to get soft-hot irq\n"); + rc = -EINVAL; + return rc; + } + rc = devm_request_threaded_irq(chip->dev, + chip->batt_irq[JEITA_SOFT_HOT].irq, + NULL, + fg_jeita_soft_hot_irq_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + "soft-hot", chip); + if (rc < 0) { + pr_err("Can't request %d soft-hot: %d\n", + chip->batt_irq[JEITA_SOFT_HOT].irq, rc); + return rc; + } + disable_irq(chip->batt_irq[JEITA_SOFT_HOT].irq); + chip->batt_irq[JEITA_SOFT_HOT].disabled = true; + chip->batt_irq[BATT_MISSING].irq = + of_irq_get_byname(child, "batt-missing"); if (chip->batt_irq[BATT_MISSING].irq < 0) { pr_err("Unable to get batt-missing irq\n"); rc = -EINVAL; @@ -5619,8 +7412,14 @@ static int fg_init_irqs(struct fg_chip *chip) chip->batt_irq[VBATT_LOW].irq, rc); return rc; } - disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq); - chip->vbat_low_irq_enabled = false; + if (chip->use_vbat_low_empty_soc) { + enable_irq_wake(chip->batt_irq[VBATT_LOW].irq); + chip->vbat_low_irq_enabled = true; + } else { + disable_irq_nosync( + chip->batt_irq[VBATT_LOW].irq); + chip->vbat_low_irq_enabled = false; + } break; case FG_ADC: break; @@ -5630,17 +7429,22 @@ static int fg_init_irqs(struct fg_chip *chip) } } + chip->irqs_enabled = true; return rc; } -static void fg_cleanup(struct fg_chip *chip) +static void fg_cancel_all_works(struct fg_chip *chip) { + cancel_delayed_work_sync(&chip->check_sanity_work); cancel_delayed_work_sync(&chip->update_sram_data); cancel_delayed_work_sync(&chip->update_temp_work); cancel_delayed_work_sync(&chip->update_jeita_setting); cancel_delayed_work_sync(&chip->check_empty_work); cancel_delayed_work_sync(&chip->batt_profile_init); alarm_try_to_cancel(&chip->fg_cap_learning_alarm); + alarm_try_to_cancel(&chip->hard_jeita_alarm); + if (!chip->ima_error_handling) + cancel_work_sync(&chip->ima_error_recovery_work); cancel_work_sync(&chip->rslow_comp_work); cancel_work_sync(&chip->set_resume_soc_work); cancel_work_sync(&chip->fg_cap_learning_work); @@ -5652,12 +7456,23 @@ static void fg_cleanup(struct fg_chip *chip) cancel_work_sync(&chip->gain_comp_work); cancel_work_sync(&chip->init_work); cancel_work_sync(&chip->charge_full_work); + cancel_work_sync(&chip->bcl_hi_power_work); cancel_work_sync(&chip->esr_extract_config_work); + cancel_work_sync(&chip->slope_limiter_work); + cancel_work_sync(&chip->dischg_gain_work); + cancel_work_sync(&chip->cc_soc_store_work); +} + +static void fg_cleanup(struct fg_chip *chip) +{ + fg_cancel_all_works(chip); + power_supply_unregister(chip->bms_psy); mutex_destroy(&chip->rslow_comp.lock); mutex_destroy(&chip->rw_lock); mutex_destroy(&chip->cyc_ctr.lock); mutex_destroy(&chip->learning_data.learning_lock); mutex_destroy(&chip->sysfs_restart_lock); + mutex_destroy(&chip->ima_recovery_lock); wakeup_source_trash(&chip->resume_soc_wakeup_source.source); wakeup_source_trash(&chip->empty_check_wakeup_source.source); wakeup_source_trash(&chip->memif_wakeup_source.source); @@ -5667,6 +7482,11 @@ static void fg_cleanup(struct fg_chip *chip) wakeup_source_trash(&chip->gain_comp_wakeup_source.source); wakeup_source_trash(&chip->capacity_learning_wakeup_source.source); wakeup_source_trash(&chip->esr_extract_wakeup_source.source); + wakeup_source_trash(&chip->slope_limit_wakeup_source.source); + wakeup_source_trash(&chip->dischg_gain_wakeup_source.source); + wakeup_source_trash(&chip->fg_reset_wakeup_source.source); + wakeup_source_trash(&chip->cc_soc_wakeup_source.source); + wakeup_source_trash(&chip->sanity_wakeup_source.source); } static int fg_remove(struct platform_device *pdev) @@ -6155,12 +7975,13 @@ static int bcl_trim_workaround(struct fg_chip *chip) return 0; } -#define FG_ALG_SYSCTL_1 0x4B0 -#define SOC_CNFG 0x450 -#define SOC_DELTA_OFFSET 3 -#define DELTA_SOC_PERCENT 1 -#define I_TERM_QUAL_BIT BIT(1) -#define PATCH_NEG_CURRENT_BIT BIT(3) +#define FG_ALG_SYSCTL_1 0x4B0 +#define SOC_CNFG 0x450 +#define SOC_DELTA_OFFSET 3 +#define DELTA_SOC_PERCENT 1 +#define ALERT_CFG_OFFSET 3 +#define I_TERM_QUAL_BIT BIT(1) +#define PATCH_NEG_CURRENT_BIT BIT(3) #define KI_COEFF_PRED_FULL_ADDR 0x408 #define KI_COEFF_PRED_FULL_4_0_MSB 0x88 #define KI_COEFF_PRED_FULL_4_0_LSB 0x00 @@ -6168,6 +7989,12 @@ static int bcl_trim_workaround(struct fg_chip *chip) #define FG_ADC_CONFIG_REG 0x4B8 #define FG_BCL_CONFIG_OFFSET 0x3 #define BCL_FORCED_HPM_IN_CHARGE BIT(2) +#define IRQ_USE_VOLTAGE_HYST_BIT BIT(0) +#define EMPTY_FROM_VOLTAGE_BIT BIT(1) +#define EMPTY_FROM_SOC_BIT BIT(2) +#define EMPTY_SOC_IRQ_MASK (IRQ_USE_VOLTAGE_HYST_BIT | \ + EMPTY_FROM_SOC_BIT | \ + EMPTY_FROM_VOLTAGE_BIT) static int fg_common_hw_init(struct fg_chip *chip) { int rc; @@ -6176,8 +8003,9 @@ static int fg_common_hw_init(struct fg_chip *chip) update_iterm(chip); update_cutoff_voltage(chip); - update_irq_volt_empty(chip); update_bcl_thresholds(chip); + if (!chip->use_vbat_low_empty_soc) + update_irq_volt_empty(chip); resume_soc_raw = settings[FG_MEM_RESUME_SOC].value; if (resume_soc_raw > 0) { @@ -6207,6 +8035,11 @@ static int fg_common_hw_init(struct fg_chip *chip) return rc; } + /* Override the voltage threshold for vbatt_low with empty_volt */ + if (chip->use_vbat_low_empty_soc) + settings[FG_MEM_BATT_LOW].value = + settings[FG_MEM_IRQ_VOLT_EMPTY].value; + rc = fg_mem_masked_write(chip, settings[FG_MEM_BATT_LOW].address, 0xFF, batt_to_setpoint_8b(settings[FG_MEM_BATT_LOW].value), settings[FG_MEM_BATT_LOW].offset); @@ -6274,20 +8107,41 @@ static int fg_common_hw_init(struct fg_chip *chip) if (fg_debug_mask & FG_STATUS) pr_info("imptr_pulse_slow is %sabled\n", chip->imptr_pulse_slow_en ? "en" : "dis"); + } - rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET, - 0); - if (rc) { - pr_err("unable to read rslow cfg: %d\n", rc); - return rc; - } + rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET, + 0); + if (rc) { + pr_err("unable to read rslow cfg: %d\n", rc); + return rc; + } - if (val & RSLOW_CFG_ON_VAL) - chip->rslow_comp.active = true; + if (val & RSLOW_CFG_ON_VAL) + chip->rslow_comp.active = true; - if (fg_debug_mask & FG_STATUS) - pr_info("rslow_comp active is %sabled\n", - chip->rslow_comp.active ? "en" : "dis"); + if (fg_debug_mask & FG_STATUS) + pr_info("rslow_comp active is %sabled\n", + chip->rslow_comp.active ? "en" : "dis"); + + /* + * Clear bits 0-2 in 0x4B3 and set them again to make empty_soc irq + * trigger again. + */ + rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK, + 0, ALERT_CFG_OFFSET); + if (rc) { + pr_err("failed to write to 0x4B3 rc=%d\n", rc); + return rc; + } + + /* Wait for a FG cycle before enabling empty soc irq configuration */ + msleep(FG_CYCLE_MS); + + rc = fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, EMPTY_SOC_IRQ_MASK, + EMPTY_SOC_IRQ_MASK, ALERT_CFG_OFFSET); + if (rc) { + pr_err("failed to write to 0x4B3 rc=%d\n", rc); + return rc; } return 0; @@ -6414,12 +8268,13 @@ static int fg_hw_init(struct fg_chip *chip) /* Setup workaround flag based on PMIC type */ if (fg_sense_type == INTERNAL_CURRENT_SENSE) chip->wa_flag |= IADC_GAIN_COMP_WA; - if (chip->pmic_revision[REVID_DIG_MAJOR] > 1) + if (chip->pmic_revision[REVID_DIG_MAJOR] >= 1) chip->wa_flag |= USE_CC_SOC_REG; break; case PMI8950: case PMI8937: + case PMI8940: rc = fg_8950_hw_init(chip); /* Setup workaround flag based on PMIC type */ chip->wa_flag |= BCL_HI_POWER_FOR_CHGLED_WA; @@ -6438,12 +8293,223 @@ static int fg_hw_init(struct fg_chip *chip) return rc; } +static int fg_init_iadc_config(struct fg_chip *chip) +{ + u8 reg[2]; + int rc; + + /* read default gain config */ + rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0); + if (rc) { + pr_err("Failed to read default gain rc=%d\n", rc); + return rc; + } + + if (reg[1] || reg[0]) { + /* + * Default gain register has valid value: + * - write to gain register. + */ + rc = fg_mem_write(chip, reg, GAIN_REG, 2, + GAIN_OFFSET, 0); + if (rc) { + pr_err("Failed to write gain rc=%d\n", rc); + return rc; + } + } else { + /* + * Default gain register is invalid: + * - read gain register for default gain value + * - write to default gain register. + */ + rc = fg_mem_read(chip, reg, GAIN_REG, 2, + GAIN_OFFSET, 0); + if (rc) { + pr_err("Failed to read gain rc=%d\n", rc); + return rc; + } + rc = fg_mem_write(chip, reg, K_VCOR_REG, 2, + DEF_GAIN_OFFSET, 0); + if (rc) { + pr_err("Failed to write default gain rc=%d\n", + rc); + return rc; + } + } + + chip->iadc_comp_data.dfl_gain_reg[0] = reg[0]; + chip->iadc_comp_data.dfl_gain_reg[1] = reg[1]; + chip->iadc_comp_data.dfl_gain = half_float(reg); + + pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n", + reg[1], reg[0], chip->iadc_comp_data.dfl_gain); + return 0; +} + +#define EN_WR_FGXCT_PRD BIT(6) +#define EN_RD_FGXCT_PRD BIT(5) +#define FG_RESTART_TIMEOUT_MS 12000 +static void ima_error_recovery_work(struct work_struct *work) +{ + struct fg_chip *chip = container_of(work, + struct fg_chip, + ima_error_recovery_work); + bool tried_again = false; + int rc; + u8 buf[4] = {0, 0, 0, 0}; + + fg_stay_awake(&chip->fg_reset_wakeup_source); + mutex_lock(&chip->ima_recovery_lock); + if (!chip->ima_error_handling) { + pr_err("Scheduled by mistake?\n"); + mutex_unlock(&chip->ima_recovery_lock); + fg_relax(&chip->fg_reset_wakeup_source); + return; + } + + /* + * SOC should be read and used until the error recovery completes. + * Without this, there could be a fluctuation in SOC values notified + * to the userspace. + */ + chip->use_last_soc = true; + + /* Block SRAM access till FG reset is complete */ + chip->block_sram_access = true; + + /* Release the mutex to avoid deadlock while cancelling the works */ + mutex_unlock(&chip->ima_recovery_lock); + + /* Cancel all the works */ + fg_cancel_all_works(chip); + + if (fg_debug_mask & FG_STATUS) + pr_info("last_soc: %d\n", chip->last_soc); + + mutex_lock(&chip->ima_recovery_lock); + /* Acquire IMA access forcibly from FG ALG */ + rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG, + EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, + EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 1); + if (rc) { + pr_err("Error in writing to IMA_CFG, rc=%d\n", rc); + goto out; + } + + /* Release the IMA access now so that FG reset can go through */ + rc = fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG, + EN_WR_FGXCT_PRD | EN_RD_FGXCT_PRD, 0, 1); + if (rc) { + pr_err("Error in writing to IMA_CFG, rc=%d\n", rc); + goto out; + } + + if (fg_debug_mask & FG_STATUS) + pr_info("resetting FG\n"); + + /* Assert FG reset */ + rc = fg_reset(chip, true); + if (rc) { + pr_err("Couldn't reset FG\n"); + goto out; + } + + /* Wait for a small time before deasserting FG reset */ + msleep(100); + + if (fg_debug_mask & FG_STATUS) + pr_info("clearing FG from reset\n"); + + /* Deassert FG reset */ + rc = fg_reset(chip, false); + if (rc) { + pr_err("Couldn't clear FG reset\n"); + goto out; + } + + /* Wait for at least a FG cycle before doing SRAM access */ + msleep(2000); + + chip->block_sram_access = false; + + if (!chip->init_done) { + schedule_work(&chip->init_work); + goto wait; + } + + if (fg_debug_mask & FG_STATUS) + pr_info("Calling hw_init\n"); + + /* + * Once FG is reset, everything in SRAM will be wiped out. Redo + * hw_init, update jeita settings etc., again to make sure all + * the settings got restored again. + */ + rc = fg_hw_init(chip); + if (rc) { + pr_err("Error in hw_init, rc=%d\n", rc); + goto out; + } + + update_jeita_setting(&chip->update_jeita_setting.work); + + if (chip->wa_flag & IADC_GAIN_COMP_WA) { + rc = fg_init_iadc_config(chip); + if (rc) + goto out; + } + + if (fg_debug_mask & FG_STATUS) + pr_info("loading battery profile\n"); + if (!chip->use_otp_profile) { + chip->battery_missing = true; + chip->profile_loaded = false; + chip->soc_reporting_ready = false; + chip->batt_type = default_batt_type; + fg_handle_battery_insertion(chip); + } + +wait: + rc = wait_for_completion_interruptible_timeout(&chip->fg_reset_done, + msecs_to_jiffies(FG_RESTART_TIMEOUT_MS)); + + /* If we were interrupted wait again one more time. */ + if (rc == -ERESTARTSYS && !tried_again) { + tried_again = true; + pr_debug("interrupted, waiting again\n"); + goto wait; + } else if (rc <= 0) { + pr_err("fg_restart taking long time rc=%d\n", rc); + goto out; + } + + rc = fg_mem_write(chip, buf, fg_data[FG_DATA_VINT_ERR].address, + fg_data[FG_DATA_VINT_ERR].len, + fg_data[FG_DATA_VINT_ERR].offset, 0); + if (rc < 0) + pr_err("Error in clearing VACT_INT_ERR, rc=%d\n", rc); + + if (fg_debug_mask & FG_STATUS) + pr_info("IMA error recovery done...\n"); +out: + fg_restore_soc(chip); + fg_restore_cc_soc(chip); + fg_enable_irqs(chip, true); + update_sram_data_work(&chip->update_sram_data.work); + update_temp_data(&chip->update_temp_work.work); + schedule_delayed_work(&chip->check_sanity_work, + msecs_to_jiffies(1000)); + chip->ima_error_handling = false; + mutex_unlock(&chip->ima_recovery_lock); + fg_relax(&chip->fg_reset_wakeup_source); +} + #define DIG_MINOR 0x0 #define DIG_MAJOR 0x1 #define ANA_MINOR 0x2 #define ANA_MAJOR 0x3 #define IACS_INTR_SRC_SLCT BIT(3) -static int fg_setup_memif_offset(struct fg_chip *chip) +static int fg_memif_init(struct fg_chip *chip) { int rc; @@ -6464,7 +8530,7 @@ static int fg_setup_memif_offset(struct fg_chip *chip) break; default: pr_err("Digital Major rev=%d not supported\n", - chip->revision[DIG_MAJOR]); + chip->revision[DIG_MAJOR]); return -EINVAL; } @@ -6481,6 +8547,13 @@ static int fg_setup_memif_offset(struct fg_chip *chip) pr_err("failed to configure interrupt source %d\n", rc); return rc; } + + /* check for error condition */ + rc = fg_check_ima_exception(chip, true); + if (rc) { + pr_err("Error in clearing IMA exception rc=%d", rc); + return rc; + } } return 0; @@ -6515,6 +8588,7 @@ static int fg_detect_pmic_type(struct fg_chip *chip) case PMI8950: case PMI8937: case PMI8996: + case PMI8940: chip->pmic_subtype = pmic_rev_id->pmic_subtype; chip->pmic_revision[REVID_RESERVED] = pmic_rev_id->rev1; chip->pmic_revision[REVID_VARIANT] = pmic_rev_id->rev2; @@ -6531,10 +8605,8 @@ static int fg_detect_pmic_type(struct fg_chip *chip) } #define INIT_JEITA_DELAY_MS 1000 - static void delayed_init_work(struct work_struct *work) { - u8 reg[2]; int rc; struct fg_chip *chip = container_of(work, struct fg_chip, @@ -6546,6 +8618,14 @@ static void delayed_init_work(struct work_struct *work) rc = fg_hw_init(chip); if (rc) { pr_err("failed to hw init rc = %d\n", rc); + if (!chip->init_done && chip->ima_supported) { + rc = fg_check_alg_status(chip); + if (rc && rc != -EBUSY) + pr_err("Couldn't check FG ALG status, rc=%d\n", + rc); + fg_mem_release(chip); + return; + } fg_mem_release(chip); fg_cleanup(chip); return; @@ -6566,57 +8646,19 @@ static void delayed_init_work(struct work_struct *work) if (!chip->use_otp_profile) schedule_delayed_work(&chip->batt_profile_init, 0); + if (chip->ima_supported && fg_reset_on_lockup) + schedule_delayed_work(&chip->check_sanity_work, + msecs_to_jiffies(1000)); + if (chip->wa_flag & IADC_GAIN_COMP_WA) { - /* read default gain config */ - rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0); - if (rc) { - pr_err("Failed to read default gain rc=%d\n", rc); + rc = fg_init_iadc_config(chip); + if (rc) goto done; - } - - if (reg[1] || reg[0]) { - /* - * Default gain register has valid value: - * - write to gain register. - */ - rc = fg_mem_write(chip, reg, GAIN_REG, 2, - GAIN_OFFSET, 0); - if (rc) { - pr_err("Failed to write gain rc=%d\n", rc); - goto done; - } - } else { - /* - * Default gain register is invalid: - * - read gain register for default gain value - * - write to default gain register. - */ - rc = fg_mem_read(chip, reg, GAIN_REG, 2, - GAIN_OFFSET, 0); - if (rc) { - pr_err("Failed to read gain rc=%d\n", rc); - goto done; - } - rc = fg_mem_write(chip, reg, K_VCOR_REG, 2, - DEF_GAIN_OFFSET, 0); - if (rc) { - pr_err("Failed to write default gain rc=%d\n", - rc); - goto done; - } - } - - chip->iadc_comp_data.dfl_gain_reg[0] = reg[0]; - chip->iadc_comp_data.dfl_gain_reg[1] = reg[1]; - chip->iadc_comp_data.dfl_gain = half_float(reg); - chip->input_present = is_input_present(chip); - chip->otg_present = is_otg_present(chip); - chip->init_done = true; - - pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n", - reg[1], reg[0], chip->iadc_comp_data.dfl_gain); } + chip->input_present = is_input_present(chip); + chip->otg_present = is_otg_present(chip); + chip->init_done = true; pr_debug("FG: HW_init success\n"); return; @@ -6675,16 +8717,30 @@ static int fg_probe(struct platform_device *pdev) "qpnp_fg_cap_learning"); wakeup_source_init(&chip->esr_extract_wakeup_source.source, "qpnp_fg_esr_extract"); + wakeup_source_init(&chip->slope_limit_wakeup_source.source, + "qpnp_fg_slope_limit"); + wakeup_source_init(&chip->dischg_gain_wakeup_source.source, + "qpnp_fg_dischg_gain"); + wakeup_source_init(&chip->fg_reset_wakeup_source.source, + "qpnp_fg_reset"); + wakeup_source_init(&chip->cc_soc_wakeup_source.source, + "qpnp_fg_cc_soc"); + wakeup_source_init(&chip->sanity_wakeup_source.source, + "qpnp_fg_sanity_check"); + spin_lock_init(&chip->sec_access_lock); mutex_init(&chip->rw_lock); mutex_init(&chip->cyc_ctr.lock); mutex_init(&chip->learning_data.learning_lock); mutex_init(&chip->rslow_comp.lock); mutex_init(&chip->sysfs_restart_lock); + mutex_init(&chip->ima_recovery_lock); INIT_DELAYED_WORK(&chip->update_jeita_setting, update_jeita_setting); INIT_DELAYED_WORK(&chip->update_sram_data, update_sram_data_work); INIT_DELAYED_WORK(&chip->update_temp_work, update_temp_data); INIT_DELAYED_WORK(&chip->check_empty_work, check_empty_work); INIT_DELAYED_WORK(&chip->batt_profile_init, batt_profile_init); + INIT_DELAYED_WORK(&chip->check_sanity_work, check_sanity_work); + INIT_WORK(&chip->ima_error_recovery_work, ima_error_recovery_work); INIT_WORK(&chip->rslow_comp_work, rslow_comp_work); INIT_WORK(&chip->fg_cap_learning_work, fg_cap_learning_work); INIT_WORK(&chip->dump_sram, dump_sram); @@ -6699,13 +8755,19 @@ static int fg_probe(struct platform_device *pdev) INIT_WORK(&chip->gain_comp_work, iadc_gain_comp_work); INIT_WORK(&chip->bcl_hi_power_work, bcl_hi_power_work); INIT_WORK(&chip->esr_extract_config_work, esr_extract_config_work); + INIT_WORK(&chip->slope_limiter_work, slope_limiter_work); + INIT_WORK(&chip->dischg_gain_work, discharge_gain_work); + INIT_WORK(&chip->cc_soc_store_work, cc_soc_store_work); alarm_init(&chip->fg_cap_learning_alarm, ALARM_BOOTTIME, fg_cap_learning_alarm_cb); + alarm_init(&chip->hard_jeita_alarm, ALARM_BOOTTIME, + fg_hard_jeita_alarm_cb); init_completion(&chip->sram_access_granted); init_completion(&chip->sram_access_revoked); complete_all(&chip->sram_access_revoked); init_completion(&chip->batt_id_avail); init_completion(&chip->first_soc_done); + init_completion(&chip->fg_reset_done); dev_set_drvdata(&pdev->dev, chip); if (of_get_available_child_count(pdev->dev.of_node) == 0) { @@ -6763,7 +8825,7 @@ static int fg_probe(struct platform_device *pdev) return rc; } - rc = fg_setup_memif_offset(chip); + rc = fg_memif_init(chip); if (rc) { pr_err("Unable to setup mem_if offsets rc=%d\n", rc); goto of_init_fail; @@ -6834,10 +8896,18 @@ static int fg_probe(struct platform_device *pdev) rc = fg_dfs_create(chip); if (rc < 0) { pr_err("failed to create debugfs rc = %d\n", rc); - goto cancel_work; + goto power_supply_unregister; } } + /* Fake temperature till the actual temperature is read */ + chip->last_good_temp = 250; + + /* Initialize batt_info variables */ + chip->batt_range_ocv = &fg_batt_valid_ocv; + chip->batt_range_pct = &fg_batt_range_pct; + memset(chip->batt_info, INT_MAX, sizeof(chip->batt_info)); + schedule_work(&chip->init_work); pr_info("FG Probe success - FG Revision DIG:%d.%d ANA:%d.%d PMIC subtype=%d\n", @@ -6847,32 +8917,17 @@ static int fg_probe(struct platform_device *pdev) return rc; +power_supply_unregister: + power_supply_unregister(chip->bms_psy); cancel_work: - cancel_delayed_work_sync(&chip->update_jeita_setting); - cancel_delayed_work_sync(&chip->update_sram_data); - cancel_delayed_work_sync(&chip->update_temp_work); - cancel_delayed_work_sync(&chip->check_empty_work); - cancel_delayed_work_sync(&chip->batt_profile_init); - alarm_try_to_cancel(&chip->fg_cap_learning_alarm); - cancel_work_sync(&chip->set_resume_soc_work); - cancel_work_sync(&chip->fg_cap_learning_work); - cancel_work_sync(&chip->dump_sram); - cancel_work_sync(&chip->status_change_work); - cancel_work_sync(&chip->cycle_count_work); - cancel_work_sync(&chip->update_esr_work); - cancel_work_sync(&chip->rslow_comp_work); - cancel_work_sync(&chip->sysfs_restart_work); - cancel_work_sync(&chip->gain_comp_work); - cancel_work_sync(&chip->init_work); - cancel_work_sync(&chip->charge_full_work); - cancel_work_sync(&chip->bcl_hi_power_work); - cancel_work_sync(&chip->esr_extract_config_work); + fg_cancel_all_works(chip); of_init_fail: mutex_destroy(&chip->rslow_comp.lock); mutex_destroy(&chip->rw_lock); mutex_destroy(&chip->cyc_ctr.lock); mutex_destroy(&chip->learning_data.learning_lock); mutex_destroy(&chip->sysfs_restart_lock); + mutex_destroy(&chip->ima_recovery_lock); wakeup_source_trash(&chip->resume_soc_wakeup_source.source); wakeup_source_trash(&chip->empty_check_wakeup_source.source); wakeup_source_trash(&chip->memif_wakeup_source.source); @@ -6882,6 +8937,11 @@ of_init_fail: wakeup_source_trash(&chip->gain_comp_wakeup_source.source); wakeup_source_trash(&chip->capacity_learning_wakeup_source.source); wakeup_source_trash(&chip->esr_extract_wakeup_source.source); + wakeup_source_trash(&chip->slope_limit_wakeup_source.source); + wakeup_source_trash(&chip->dischg_gain_wakeup_source.source); + wakeup_source_trash(&chip->fg_reset_wakeup_source.source); + wakeup_source_trash(&chip->cc_soc_wakeup_source.source); + wakeup_source_trash(&chip->sanity_wakeup_source.source); return rc; } @@ -6938,11 +8998,103 @@ static int fg_resume(struct device *dev) return 0; } +static void fg_check_ima_idle(struct fg_chip *chip) +{ + bool rif_mem_sts = true; + int rc, time_count = 0; + + mutex_lock(&chip->rw_lock); + /* Make sure IMA is idle */ + while (1) { + rc = fg_check_rif_mem_access(chip, &rif_mem_sts); + if (rc) + break; + + if (!rif_mem_sts) + break; + + if (time_count > 4) { + pr_err("Waited for ~16ms polling RIF_MEM_ACCESS_REQ\n"); + fg_run_iacs_clear_sequence(chip); + break; + } + + /* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */ + usleep_range(4000, 4100); + time_count++; + } + mutex_unlock(&chip->rw_lock); +} + +static void fg_shutdown(struct platform_device *pdev) +{ + struct fg_chip *chip = dev_get_drvdata(&pdev->dev); + + if (fg_debug_mask & FG_STATUS) + pr_emerg("FG shutdown started\n"); + fg_cancel_all_works(chip); + fg_check_ima_idle(chip); + chip->fg_shutdown = true; + if (fg_debug_mask & FG_STATUS) + pr_emerg("FG shutdown complete\n"); +} + static const struct dev_pm_ops qpnp_fg_pm_ops = { .suspend = fg_suspend, .resume = fg_resume, }; +static int fg_reset_lockup_set(const char *val, const struct kernel_param *kp) +{ + int rc; + struct power_supply *bms_psy; + struct fg_chip *chip; + int old_val = fg_reset_on_lockup; + + rc = param_set_int(val, kp); + if (rc) { + pr_err("Unable to set fg_reset_on_lockup: %d\n", rc); + return rc; + } + + if (fg_reset_on_lockup != 0 && fg_reset_on_lockup != 1) { + pr_err("Bad value %d\n", fg_reset_on_lockup); + fg_reset_on_lockup = old_val; + return -EINVAL; + } + + bms_psy = power_supply_get_by_name("bms"); + if (!bms_psy) { + pr_err("bms psy not found\n"); + return 0; + } + + chip = power_supply_get_drvdata(bms_psy); + if (!chip->ima_supported) { + pr_err("Cannot set this for non-IMA supported FG\n"); + fg_reset_on_lockup = old_val; + return -EINVAL; + } + + if (fg_debug_mask & FG_STATUS) + pr_info("fg_reset_on_lockup set to %d\n", fg_reset_on_lockup); + + if (fg_reset_on_lockup) + schedule_delayed_work(&chip->check_sanity_work, + msecs_to_jiffies(1000)); + else + cancel_delayed_work_sync(&chip->check_sanity_work); + + return rc; +} + +static struct kernel_param_ops fg_reset_ops = { + .set = fg_reset_lockup_set, + .get = param_get_int, +}; + +module_param_cb(reset_on_lockup, &fg_reset_ops, &fg_reset_on_lockup, 0644); + static int fg_sense_type_set(const char *val, const struct kernel_param *kp) { int rc; @@ -7025,6 +9177,7 @@ static struct platform_driver fg_driver = { }, .probe = fg_probe, .remove = fg_remove, + .shutdown = fg_shutdown, }; static int __init fg_init(void) diff --git a/drivers/power/supply/qcom/qpnp-smbcharger.c b/drivers/power/supply/qcom/qpnp-smbcharger.c index a2863dcf7389..a31d4d0cb198 100644 --- a/drivers/power/supply/qcom/qpnp-smbcharger.c +++ b/drivers/power/supply/qcom/qpnp-smbcharger.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2016, 2019 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -144,6 +144,7 @@ struct smbchg_chip { bool vbat_above_headroom; bool force_aicl_rerun; bool hvdcp3_supported; + bool allow_hvdcp3_detection; bool restricted_charging; bool skip_usb_suspend_for_fake_battery; bool hvdcp_not_supported; @@ -175,6 +176,7 @@ struct smbchg_chip { int n_vbat_samples; /* status variables */ + int max_pulse_allowed; int wake_reasons; int previous_soc; int usb_online; @@ -286,7 +288,7 @@ struct smbchg_chip { struct votable *hw_aicl_rerun_disable_votable; struct votable *hw_aicl_rerun_enable_indirect_votable; struct votable *aicl_deglitch_short_votable; - + struct votable *hvdcp_enable_votable; /* extcon for VBUS / ID notification to USB */ struct extcon_dev *extcon; }; @@ -351,6 +353,7 @@ enum wake_reason { #define WEAK_CHARGER_ICL_VOTER "WEAK_CHARGER_ICL_VOTER" #define SW_AICL_ICL_VOTER "SW_AICL_ICL_VOTER" #define CHG_SUSPEND_WORKAROUND_ICL_VOTER "CHG_SUSPEND_WORKAROUND_ICL_VOTER" +#define SHUTDOWN_WORKAROUND_ICL_VOTER "SHUTDOWN_WORKAROUND_ICL_VOTER" /* USB SUSPEND VOTERS */ /* userspace has suspended charging altogether */ @@ -411,6 +414,10 @@ enum wake_reason { "VARB_WRKARND_SHORT_DEGLITCH_VOTER" /* QC 2.0 */ #define HVDCP_SHORT_DEGLITCH_VOTER "HVDCP_SHORT_DEGLITCH_VOTER" +/* Hvdcp enable voters*/ +#define HVDCP_PMIC_VOTER "HVDCP_PMIC_VOTER" +#define HVDCP_OTG_VOTER "HVDCP_OTG_VOTER" +#define HVDCP_PULSING_VOTER "HVDCP_PULSING_VOTER" static const unsigned int smbchg_extcon_cable[] = { EXTCON_USB, @@ -420,61 +427,61 @@ static const unsigned int smbchg_extcon_cable[] = { static int smbchg_debug_mask; module_param_named( - debug_mask, smbchg_debug_mask, int, S_IRUSR | S_IWUSR + debug_mask, smbchg_debug_mask, int, 00600 ); static int smbchg_parallel_en = 1; module_param_named( - parallel_en, smbchg_parallel_en, int, S_IRUSR | S_IWUSR + parallel_en, smbchg_parallel_en, int, 00600 ); static int smbchg_main_chg_fcc_percent = 50; module_param_named( main_chg_fcc_percent, smbchg_main_chg_fcc_percent, - int, S_IRUSR | S_IWUSR + int, 00600 ); static int smbchg_main_chg_icl_percent = 60; module_param_named( main_chg_icl_percent, smbchg_main_chg_icl_percent, - int, S_IRUSR | S_IWUSR + int, 00600 ); static int smbchg_default_hvdcp_icl_ma = 1800; module_param_named( default_hvdcp_icl_ma, smbchg_default_hvdcp_icl_ma, - int, S_IRUSR | S_IWUSR + int, 00600 ); static int smbchg_default_hvdcp3_icl_ma = 3000; module_param_named( default_hvdcp3_icl_ma, smbchg_default_hvdcp3_icl_ma, - int, S_IRUSR | S_IWUSR + int, 00600 ); static int smbchg_default_dcp_icl_ma = 1800; module_param_named( default_dcp_icl_ma, smbchg_default_dcp_icl_ma, - int, S_IRUSR | S_IWUSR + int, 00600 ); static int wipower_dyn_icl_en; module_param_named( dynamic_icl_wipower_en, wipower_dyn_icl_en, - int, S_IRUSR | S_IWUSR + int, 00600 ); static int wipower_dcin_interval = ADC_MEAS1_INTERVAL_2P0MS; module_param_named( wipower_dcin_interval, wipower_dcin_interval, - int, S_IRUSR | S_IWUSR + int, 00600 ); #define WIPOWER_DEFAULT_HYSTERISIS_UV 250000 static int wipower_dcin_hyst_uv = WIPOWER_DEFAULT_HYSTERISIS_UV; module_param_named( wipower_dcin_hyst_uv, wipower_dcin_hyst_uv, - int, S_IRUSR | S_IWUSR + int, 00600 ); #define pr_smb(reason, fmt, ...) \ @@ -625,6 +632,18 @@ static void smbchg_relax(struct smbchg_chip *chip, int reason) mutex_unlock(&chip->pm_lock); }; +static bool is_bms_psy_present(struct smbchg_chip *chip) +{ + if (chip->bms_psy) + return true; + + if (chip->bms_psy_name) + chip->bms_psy = power_supply_get_by_name( + (char *)chip->bms_psy_name); + + return chip->bms_psy ? true : false; +} + enum pwr_path_type { UNKNOWN = 0, PWR_PATH_BATTERY = 1, @@ -804,6 +823,7 @@ static char *usb_type_str[] = { static int get_type(u8 type_reg) { unsigned long type = type_reg; + type >>= TYPE_BITS_OFFSET; return find_first_bit(&type, N_TYPE_BITS); } @@ -1059,6 +1079,33 @@ static int get_prop_batt_current_now(struct smbchg_chip *chip) return ua; } +#define DEFAULT_BATT_RESISTANCE_ID 0 +static int get_prop_batt_resistance_id(struct smbchg_chip *chip) +{ + int rbatt, rc; + + rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_RESISTANCE_ID, + &rbatt); + if (rc) { + pr_smb(PR_STATUS, "Couldn't get resistance id rc = %d\n", rc); + rbatt = DEFAULT_BATT_RESISTANCE_ID; + } + return rbatt; +} + +#define DEFAULT_BATT_FULL_CHG_CAPACITY 0 +static int get_prop_batt_full_charge(struct smbchg_chip *chip) +{ + int bfc, rc; + + rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CHARGE_FULL, &bfc); + if (rc) { + pr_smb(PR_STATUS, "Couldn't get charge_full rc = %d\n", rc); + bfc = DEFAULT_BATT_FULL_CHG_CAPACITY; + } + return bfc; +} + #define DEFAULT_BATT_VOLTAGE_NOW 0 static int get_prop_batt_voltage_now(struct smbchg_chip *chip) { @@ -1485,6 +1532,47 @@ static struct power_supply *get_parallel_psy(struct smbchg_chip *chip) return chip->parallel.psy; } +static int smbchg_request_dpdm(struct smbchg_chip *chip, bool enable) +{ + int rc = 0; + + /* fetch the DPDM regulator */ + if (!chip->dpdm_reg && of_get_property(chip->dev->of_node, + "dpdm-supply", NULL)) { + chip->dpdm_reg = devm_regulator_get(chip->dev, "dpdm"); + if (IS_ERR(chip->dpdm_reg)) { + rc = PTR_ERR(chip->dpdm_reg); + dev_err(chip->dev, "Couldn't get dpdm regulator rc=%d\n", + rc); + chip->dpdm_reg = NULL; + return rc; + } + } + + if (!chip->dpdm_reg) + return -ENODEV; + + if (enable) { + if (!regulator_is_enabled(chip->dpdm_reg)) { + pr_smb(PR_STATUS, "enabling DPDM regulator\n"); + rc = regulator_enable(chip->dpdm_reg); + if (rc < 0) + dev_err(chip->dev, "Couldn't enable dpdm regulator rc=%d\n", + rc); + } + } else { + if (regulator_is_enabled(chip->dpdm_reg)) { + pr_smb(PR_STATUS, "disabling DPDM regulator\n"); + rc = regulator_disable(chip->dpdm_reg); + if (rc < 0) + dev_err(chip->dev, "Couldn't disable dpdm regulator rc=%d\n", + rc); + } + } + + return rc; +} + static void smbchg_usb_update_online_work(struct work_struct *work) { struct smbchg_chip *chip = container_of(work, @@ -1849,6 +1937,22 @@ static bool smbchg_is_usbin_active_pwr_src(struct smbchg_chip *chip) && (reg & USBIN_ACTIVE_PWR_SRC_BIT); } +static void smbchg_detect_parallel_charger(struct smbchg_chip *chip) +{ + int rc; + struct power_supply *parallel_psy = get_parallel_psy(chip); + union power_supply_propval pval = {0, }; + + if (parallel_psy) { + pval.intval = true; + rc = power_supply_set_property(parallel_psy, + POWER_SUPPLY_PROP_PRESENT, &pval); + chip->parallel_charger_detected = rc ? false : true; + if (rc) + pr_debug("parallel-charger absent rc=%d\n", rc); + } +} + static int smbchg_parallel_usb_charging_en(struct smbchg_chip *chip, bool en) { struct power_supply *parallel_psy = get_parallel_psy(chip); @@ -1874,6 +1978,7 @@ static int smbchg_sw_esr_pulse_en(struct smbchg_chip *chip, bool en) return 0; } + fg_current_now = abs(fg_current_now) / 1000; icl_ma = max(chip->iterm_ma + ESR_PULSE_CURRENT_DELTA_MA, fg_current_now - ESR_PULSE_CURRENT_DELTA_MA); rc = vote(chip->fcc_votable, ESR_PULSE_FCC_VOTER, en, icl_ma); @@ -1985,7 +2090,8 @@ static void smbchg_parallel_usb_taper(struct smbchg_chip *chip) int parallel_fcc_ma, tries = 0; u8 reg = 0; - if (!parallel_psy || !chip->parallel_charger_detected) + smbchg_detect_parallel_charger(chip); + if (!chip->parallel_charger_detected) return; smbchg_stay_awake(chip, PM_PARALLEL_TAPER); @@ -2121,8 +2227,6 @@ static void smbchg_parallel_usb_enable(struct smbchg_chip *chip, supplied_parallel_fcc_ma); chip->parallel.enabled_once = true; - - return; } static bool smbchg_is_parallel_usb_ok(struct smbchg_chip *chip, @@ -2406,6 +2510,27 @@ static int dc_suspend_vote_cb(struct votable *votable, return rc; } +#define HVDCP_EN_BIT BIT(3) +static int smbchg_hvdcp_enable_cb(struct votable *votable, + void *data, + int enable, + const char *client) +{ + int rc = 0; + struct smbchg_chip *chip = data; + + pr_err("smbchg_hvdcp_enable_cb HVDCP %s\n", + enable ? "enabled" : "disabled"); + rc = smbchg_sec_masked_write(chip, + chip->usb_chgpth_base + CHGPTH_CFG, + HVDCP_EN_BIT, enable ? HVDCP_EN_BIT : 0); + if (rc < 0) + dev_err(chip->dev, "Couldn't %s HVDCP rc=%d\n", + enable ? "enable" : "disable", rc); + + return rc; +} + static int set_fastchg_current_vote_cb(struct votable *votable, void *data, int fcc_ma, @@ -3635,17 +3760,11 @@ static void check_battery_type(struct smbchg_chip *chip) static void smbchg_external_power_changed(struct power_supply *psy) { struct smbchg_chip *chip = power_supply_get_drvdata(psy); - union power_supply_propval prop = {0,}; - int rc, current_limit = 0, soc; - enum power_supply_type usb_supply_type; - char *usb_type_name = "null"; - - if (chip->bms_psy_name) - chip->bms_psy = - power_supply_get_by_name((char *)chip->bms_psy_name); + int rc, soc; smbchg_aicl_deglitch_wa_check(chip); - if (chip->bms_psy) { + + if (is_bms_psy_present(chip)) { check_battery_type(chip); soc = get_prop_batt_capacity(chip); if (chip->previous_soc != soc) { @@ -3660,37 +3779,8 @@ static void smbchg_external_power_changed(struct power_supply *psy) rc); } - rc = power_supply_get_property(chip->usb_psy, - POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop); - if (rc == 0) - vote(chip->usb_suspend_votable, POWER_SUPPLY_EN_VOTER, - !prop.intval, 0); - - current_limit = chip->usb_current_max / 1000; - - /* Override if type-c charger used */ - if (chip->typec_current_ma > 500 && - current_limit < chip->typec_current_ma) - current_limit = chip->typec_current_ma; - - read_usb_type(chip, &usb_type_name, &usb_supply_type); - - if (usb_supply_type != POWER_SUPPLY_TYPE_USB) - goto skip_current_for_non_sdp; - - pr_smb(PR_MISC, "usb type = %s current_limit = %d\n", - usb_type_name, current_limit); - - rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true, - current_limit); - if (rc < 0) - pr_err("Couldn't update USB PSY ICL vote rc=%d\n", rc); - -skip_current_for_non_sdp: + /* adjust vfloat */ smbchg_vfloat_adjust_check(chip); - - if (chip->batt_psy) - power_supply_changed(chip->batt_psy); } static int smbchg_otg_regulator_enable(struct regulator_dev *rdev) @@ -3766,7 +3856,6 @@ struct regulator_ops smbchg_otg_reg_ops = { #define USBIN_ADAPTER_9V 0x3 #define USBIN_ADAPTER_5V_9V_CONT 0x2 #define USBIN_ADAPTER_5V_UNREGULATED_9V 0x5 -#define HVDCP_EN_BIT BIT(3) static int smbchg_external_otg_regulator_enable(struct regulator_dev *rdev) { int rc = 0; @@ -3790,9 +3879,7 @@ static int smbchg_external_otg_regulator_enable(struct regulator_dev *rdev) * allowance to 9V, so that the audio boost operating in reverse never * gets detected as a valid input */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, 0); + rc = vote(chip->hvdcp_enable_votable, HVDCP_OTG_VOTER, true, 0); if (rc < 0) { dev_err(chip->dev, "Couldn't disable HVDCP rc=%d\n", rc); return rc; @@ -3826,9 +3913,7 @@ static int smbchg_external_otg_regulator_disable(struct regulator_dev *rdev) * value in order to allow normal USBs to be recognized as a valid * input. */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, HVDCP_EN_BIT); + rc = vote(chip->hvdcp_enable_votable, HVDCP_OTG_VOTER, false, 1); if (rc < 0) { dev_err(chip->dev, "Couldn't enable HVDCP rc=%d\n", rc); return rc; @@ -3958,6 +4043,11 @@ static void smbchg_chg_led_brightness_set(struct led_classdev *cdev, u8 reg; int rc; + if (!is_bms_psy_present(chip)) { + dev_err(chip->dev, "Couldn't access bms psy\n"); + return; + } + reg = (value > LED_OFF) ? CHG_LED_ON << CHG_LED_SHIFT : CHG_LED_OFF << CHG_LED_SHIFT; pval.intval = value > LED_OFF ? 1 : 0; @@ -4005,6 +4095,11 @@ static void smbchg_chg_led_blink_set(struct smbchg_chip *chip, u8 reg; int rc; + if (!is_bms_psy_present(chip)) { + dev_err(chip->dev, "Couldn't access bms psy\n"); + return; + } + pval.intval = (blinking == 0) ? 0 : 1; power_supply_set_property(chip->bms_psy, POWER_SUPPLY_PROP_HI_POWER, &pval); @@ -4013,11 +4108,11 @@ static void smbchg_chg_led_blink_set(struct smbchg_chip *chip, reg = CHG_LED_OFF << CHG_LED_SHIFT; } else { if (blinking == 1) - reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT; - else if (blinking == 2) reg = LED_BLINKING_PATTERN2 << CHG_LED_SHIFT; - else + else if (blinking == 2) reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT; + else + reg = LED_BLINKING_PATTERN2 << CHG_LED_SHIFT; } rc = smbchg_sec_masked_write(chip, @@ -4127,7 +4222,7 @@ static int smbchg_trim_add_steps(int prev_trim, int delta_steps) else if (scale_code > CENTER_TRIM_CODE) linear_scale = scale_code - (CENTER_TRIM_CODE + 1); - /* check if we can accomodate delta steps with just the offset */ + /* check if we can accommodate delta steps with just the offset */ if (linear_offset + delta_steps >= 0 && linear_offset + delta_steps <= MAX_LIN_CODE) { linear_offset += delta_steps; @@ -4317,7 +4412,6 @@ stop: reschedule: schedule_delayed_work(&chip->vfloat_adjust_work, msecs_to_jiffies(VFLOAT_RESAMPLE_DELAY_MS)); - return; } static int smbchg_charging_status_change(struct smbchg_chip *chip) @@ -4407,9 +4501,26 @@ static int smbchg_change_usb_supply_type(struct smbchg_chip *chip, goto out; } - /* otherwise if it is unknown, set type after the vote */ - if (type == POWER_SUPPLY_TYPE_UNKNOWN) + /* otherwise if it is unknown, set type after removing the vote */ + if (type == POWER_SUPPLY_TYPE_UNKNOWN) { + rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true, 0); + if (rc < 0) + pr_err("Couldn't vote for new USB ICL rc=%d\n", rc); chip->usb_supply_type = type; + } + /* + * Update TYPE property to DCP for HVDCP/HVDCP3 charger types + * so that they can be recongized as AC chargers by healthd. + * Don't report UNKNOWN charger type to prevent healthd missing + * detecting this power_supply status change. + */ + if (chip->usb_supply_type == POWER_SUPPLY_TYPE_USB_HVDCP_3 + || chip->usb_supply_type == POWER_SUPPLY_TYPE_USB_HVDCP) + chip->usb_psy_d.type = POWER_SUPPLY_TYPE_USB_DCP; + else if (chip->usb_supply_type == POWER_SUPPLY_TYPE_UNKNOWN) + chip->usb_psy_d.type = POWER_SUPPLY_TYPE_USB; + else + chip->usb_psy_d.type = chip->usb_supply_type; if (!chip->skip_usb_notification) power_supply_changed(chip->usb_psy); @@ -4507,8 +4618,11 @@ static int set_usb_psy_dp_dm(struct smbchg_chip *chip, int state) if (!rc && !(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) { pr_smb(PR_MISC, "overwriting state = %d with %d\n", state, POWER_SUPPLY_DP_DM_DPF_DMF); - if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg)) - return regulator_enable(chip->dpdm_reg); + rc = smbchg_request_dpdm(chip, true); + if (rc < 0) { + pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc); + return rc; + } } pr_smb(PR_MISC, "setting usb psy dp dm = %d\n", state); pval.intval = state; @@ -4523,11 +4637,6 @@ static void restore_from_hvdcp_detection(struct smbchg_chip *chip) { int rc; - pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n"); - rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0); - if (rc < 0) - pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc); - /* switch to 9V HVDCP */ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, HVDCP_ADAPTER_SEL_MASK, HVDCP_9V); @@ -4535,9 +4644,7 @@ static void restore_from_hvdcp_detection(struct smbchg_chip *chip) pr_err("Couldn't configure HVDCP 9V rc=%d\n", rc); /* enable HVDCP */ - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, HVDCP_EN_BIT); + rc = vote(chip->hvdcp_enable_votable, HVDCP_PULSING_VOTER, false, 1); if (rc < 0) pr_err("Couldn't enable HVDCP rc=%d\n", rc); @@ -4562,6 +4669,19 @@ static void restore_from_hvdcp_detection(struct smbchg_chip *chip) chip->hvdcp_3_det_ignore_uv = false; chip->pulse_cnt = 0; + + if ((chip->schg_version == QPNP_SCHG_LITE) + && is_hvdcp_present(chip)) { + pr_smb(PR_MISC, "Forcing 9V HVDCP 2.0\n"); + rc = force_9v_hvdcp(chip); + if (rc) + pr_err("Failed to force 9V HVDCP=%d\n", rc); + } + + pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n"); + rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0); + if (rc < 0) + pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc); } #define RESTRICTED_CHG_FCC_PERCENT 50 @@ -4603,10 +4723,12 @@ static void handle_usb_removal(struct smbchg_chip *chip) /* Clear typec current status */ if (chip->typec_psy) chip->typec_current_ma = 0; + /* cancel/wait for hvdcp pending work if any */ + cancel_delayed_work_sync(&chip->hvdcp_det_work); + smbchg_relax(chip, PM_DETECT_HVDCP); smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_UNKNOWN); extcon_set_cable_state_(chip->extcon, EXTCON_USB, chip->usb_present); - if (chip->dpdm_reg) - regulator_disable(chip->dpdm_reg); + smbchg_request_dpdm(chip, false); schedule_work(&chip->usb_set_online_work); pr_smb(PR_MISC, "setting usb psy health UNKNOWN\n"); @@ -4655,8 +4777,6 @@ static bool is_usbin_uv_high(struct smbchg_chip *chip) #define HVDCP_NOTIFY_MS 2500 static void handle_usb_insertion(struct smbchg_chip *chip) { - struct power_supply *parallel_psy = get_parallel_psy(chip); - union power_supply_propval pval = {0, }; enum power_supply_type usb_supply_type; int rc; char *usb_type_name = "null"; @@ -4703,14 +4823,7 @@ static void handle_usb_insertion(struct smbchg_chip *chip) msecs_to_jiffies(HVDCP_NOTIFY_MS)); } - if (parallel_psy) { - pval.intval = true; - rc = power_supply_set_property(parallel_psy, - POWER_SUPPLY_PROP_PRESENT, &pval); - chip->parallel_charger_detected = rc ? false : true; - if (rc) - pr_debug("parallel-charger absent rc=%d\n", rc); - } + smbchg_detect_parallel_charger(chip); if (chip->parallel.avail && chip->aicl_done_irq && !chip->enable_aicl_wake) { @@ -4957,7 +5070,7 @@ static int wait_for_src_detect(struct smbchg_chip *chip, bool high) if (high == src_detect) return 0; - pr_err("src detect didnt go to a %s state, still at %s, tries = %d, rc = %d\n", + pr_err("src detect didn't go to a %s state, still at %s, tries = %d, rc = %d\n", high ? "risen" : "lowered", src_detect ? "high" : "low", tries, rc); @@ -5023,6 +5136,30 @@ static int fake_insertion_removal(struct smbchg_chip *chip, bool insertion) return 0; } +static void smbchg_handle_hvdcp3_disable(struct smbchg_chip *chip) +{ + enum power_supply_type usb_supply_type; + char *usb_type_name = "NULL"; + + if (chip->allow_hvdcp3_detection) + return; + + chip->pulse_cnt = 0; + + if (is_hvdcp_present(chip)) { + smbchg_change_usb_supply_type(chip, + POWER_SUPPLY_TYPE_USB_HVDCP); + } else if (is_usb_present(chip)) { + read_usb_type(chip, &usb_type_name, &usb_supply_type); + smbchg_change_usb_supply_type(chip, usb_supply_type); + if (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP) + schedule_delayed_work(&chip->hvdcp_det_work, + msecs_to_jiffies(HVDCP_NOTIFY_MS)); + } else { + smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_UNKNOWN); + } +} + static int smbchg_prepare_for_pulsing(struct smbchg_chip *chip) { int rc = 0; @@ -5050,8 +5187,7 @@ static int smbchg_prepare_for_pulsing(struct smbchg_chip *chip) /* disable HVDCP */ pr_smb(PR_MISC, "Disable HVDCP\n"); - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, 0); + rc = vote(chip->hvdcp_enable_votable, HVDCP_PULSING_VOTER, true, 0); if (rc < 0) { pr_err("Couldn't disable HVDCP rc=%d\n", rc); goto out; @@ -5142,8 +5278,7 @@ static int smbchg_unprepare_for_pulsing(struct smbchg_chip *chip) { int rc = 0; - if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg)) - rc = regulator_enable(chip->dpdm_reg); + rc = smbchg_request_dpdm(chip, true); if (rc < 0) { pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc); return rc; @@ -5160,9 +5295,7 @@ static int smbchg_unprepare_for_pulsing(struct smbchg_chip *chip) /* enable HVDCP */ pr_smb(PR_MISC, "Enable HVDCP\n"); - rc = smbchg_sec_masked_write(chip, - chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, HVDCP_EN_BIT); + rc = vote(chip->hvdcp_enable_votable, HVDCP_PULSING_VOTER, false, 1); if (rc < 0) { pr_err("Couldn't enable HVDCP rc=%d\n", rc); return rc; @@ -5203,6 +5336,15 @@ static int smbchg_unprepare_for_pulsing(struct smbchg_chip *chip) */ chip->parallel.enabled_once = false; + /* Enable AICL */ + pr_smb(PR_MISC, "Enable AICL\n"); + rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, + AICL_EN_BIT, AICL_EN_BIT); + if (rc < 0) { + pr_err("Couldn't enable AICL rc=%d\n", rc); + goto out; + } + /* fake an insertion */ pr_smb(PR_MISC, "Faking Insertion\n"); rc = fake_insertion_removal(chip, true); @@ -5212,15 +5354,6 @@ static int smbchg_unprepare_for_pulsing(struct smbchg_chip *chip) } chip->hvdcp_3_det_ignore_uv = false; - /* Enable AICL */ - pr_smb(PR_MISC, "Enable AICL\n"); - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, - AICL_EN_BIT, 0); - if (rc < 0) { - pr_err("Couldn't enable AICL rc=%d\n", rc); - return rc; - } - out: /* * There are many QC 2.0 chargers that collapse before the aicl deglitch @@ -5243,6 +5376,9 @@ out: pr_smb(PR_MISC, "HVDCP removed\n"); update_usb_status(chip, 0, 0); } + + smbchg_handle_hvdcp3_disable(chip); + return rc; } @@ -5250,49 +5386,63 @@ out: #define APSD_RERUN BIT(0) static int rerun_apsd(struct smbchg_chip *chip) { - int rc; + int rc = 0; - reinit_completion(&chip->src_det_raised); - reinit_completion(&chip->usbin_uv_lowered); - reinit_completion(&chip->src_det_lowered); - reinit_completion(&chip->usbin_uv_raised); + chip->hvdcp_3_det_ignore_uv = true; - /* re-run APSD */ - rc = smbchg_masked_write(chip, chip->usb_chgpth_base + USB_CMD_APSD, - APSD_RERUN, APSD_RERUN); - if (rc) { - pr_err("Couldn't re-run APSD rc=%d\n", rc); - return rc; - } + if (chip->schg_version == QPNP_SCHG_LITE) { + pr_smb(PR_STATUS, "Re-running APSD\n"); + reinit_completion(&chip->src_det_raised); + reinit_completion(&chip->usbin_uv_lowered); + reinit_completion(&chip->src_det_lowered); + reinit_completion(&chip->usbin_uv_raised); - pr_smb(PR_MISC, "Waiting on rising usbin uv\n"); - rc = wait_for_usbin_uv(chip, true); - if (rc < 0) { - pr_err("wait for usbin uv failed rc = %d\n", rc); - return rc; - } + /* re-run APSD */ + rc = smbchg_masked_write(chip, + chip->usb_chgpth_base + USB_CMD_APSD, + APSD_RERUN, APSD_RERUN); + if (rc) { + pr_err("Couldn't re-run APSD rc=%d\n", rc); + goto out; + } - pr_smb(PR_MISC, "Waiting on falling src det\n"); - rc = wait_for_src_detect(chip, false); - if (rc < 0) { - pr_err("wait for src detect failed rc = %d\n", rc); - return rc; - } + pr_smb(PR_MISC, "Waiting on rising usbin uv\n"); + rc = wait_for_usbin_uv(chip, true); + if (rc < 0) { + pr_err("wait for usbin uv failed rc = %d\n", rc); + goto out; + } - pr_smb(PR_MISC, "Waiting on falling usbin uv\n"); - rc = wait_for_usbin_uv(chip, false); - if (rc < 0) { - pr_err("wait for usbin uv failed rc = %d\n", rc); - return rc; - } + pr_smb(PR_MISC, "Waiting on falling src det\n"); + rc = wait_for_src_detect(chip, false); + if (rc < 0) { + pr_err("wait for src detect failed rc = %d\n", rc); + goto out; + } - pr_smb(PR_MISC, "Waiting on rising src det\n"); - rc = wait_for_src_detect(chip, true); - if (rc < 0) { - pr_err("wait for src detect failed rc = %d\n", rc); - return rc; + pr_smb(PR_MISC, "Waiting on falling usbin uv\n"); + rc = wait_for_usbin_uv(chip, false); + if (rc < 0) { + pr_err("wait for usbin uv failed rc = %d\n", rc); + goto out; + } + + pr_smb(PR_MISC, "Waiting on rising src det\n"); + rc = wait_for_src_detect(chip, true); + if (rc < 0) { + pr_err("wait for src detect failed rc = %d\n", rc); + goto out; + } + } else { + pr_smb(PR_STATUS, "Faking Removal\n"); + rc = fake_insertion_removal(chip, false); + msleep(500); + pr_smb(PR_STATUS, "Faking Insertion\n"); + rc = fake_insertion_removal(chip, true); } +out: + chip->hvdcp_3_det_ignore_uv = false; return rc; } @@ -5332,6 +5482,12 @@ static int smbchg_prepare_for_pulsing_lite(struct smbchg_chip *chip) { int rc = 0; + pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n"); + rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300); + if (rc < 0) { + pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc); + return rc; + } /* check if HVDCP is already in 5V continuous mode */ if (is_hvdcp_5v_cont_mode(chip)) { pr_smb(PR_MISC, "HVDCP by default is in 5V continuous mode\n"); @@ -5358,19 +5514,10 @@ static int smbchg_prepare_for_pulsing_lite(struct smbchg_chip *chip) goto out; } - pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n"); - rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300); - if (rc < 0) { - pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc); - goto out; - } - pr_smb(PR_MISC, "Disable AICL\n"); smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, AICL_EN_BIT, 0); - chip->hvdcp_3_det_ignore_uv = true; - /* re-run APSD */ rc = rerun_apsd(chip); if (rc) { @@ -5378,8 +5525,6 @@ static int smbchg_prepare_for_pulsing_lite(struct smbchg_chip *chip) goto out; } - chip->hvdcp_3_det_ignore_uv = false; - pr_smb(PR_MISC, "Enable AICL\n"); smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG, AICL_EN_BIT, AICL_EN_BIT); @@ -5408,6 +5553,10 @@ static int smbchg_prepare_for_pulsing_lite(struct smbchg_chip *chip) out: chip->hvdcp_3_det_ignore_uv = false; restore_from_hvdcp_detection(chip); + if (!is_src_detect_high(chip)) { + pr_smb(PR_MISC, "HVDCP removed - force removal\n"); + update_usb_status(chip, 0, true); + } return rc; } @@ -5427,6 +5576,12 @@ static int smbchg_unprepare_for_pulsing_lite(struct smbchg_chip *chip) if (rc < 0) pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc); + if (!is_src_detect_high(chip)) { + pr_smb(PR_MISC, "HVDCP removed\n"); + update_usb_status(chip, 0, 0); + } + smbchg_handle_hvdcp3_disable(chip); + return rc; } @@ -5564,6 +5719,7 @@ static void update_typec_otg_status(struct smbchg_chip *chip, int mode, bool force) { union power_supply_propval pval = {0, }; + pr_smb(PR_TYPEC, "typec mode = %d\n", mode); if (mode == POWER_SUPPLY_TYPE_DFP) { @@ -5585,6 +5741,21 @@ static void update_typec_otg_status(struct smbchg_chip *chip, int mode, } } +static int smbchg_set_sdp_current(struct smbchg_chip *chip, int current_ma) +{ + if (chip->usb_supply_type == POWER_SUPPLY_TYPE_USB) { + /* Override if type-c charger used */ + if (chip->typec_current_ma > 500 && + current_ma < chip->typec_current_ma) { + current_ma = chip->typec_current_ma; + } + pr_smb(PR_MISC, "from USB current_ma = %d\n", current_ma); + vote(chip->usb_icl_votable, PSY_ICL_VOTER, true, current_ma); + } + + return 0; +} + static int smbchg_usb_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -5593,7 +5764,12 @@ static int smbchg_usb_get_property(struct power_supply *psy, switch (psp) { case POWER_SUPPLY_PROP_CURRENT_MAX: - val->intval = chip->usb_current_max; + case POWER_SUPPLY_PROP_SDP_CURRENT_MAX: + if (chip->usb_icl_votable) + val->intval = get_client_vote(chip->usb_icl_votable, + PSY_ICL_VOTER) * 1000; + else + val->intval = 0; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = chip->usb_present; @@ -5602,6 +5778,9 @@ static int smbchg_usb_get_property(struct power_supply *psy, val->intval = chip->usb_online; break; case POWER_SUPPLY_PROP_TYPE: + val->intval = chip->usb_psy_d.type; + break; + case POWER_SUPPLY_PROP_REAL_TYPE: val->intval = chip->usb_supply_type; break; case POWER_SUPPLY_PROP_HEALTH: @@ -5620,25 +5799,26 @@ static int smbchg_usb_set_property(struct power_supply *psy, struct smbchg_chip *chip = power_supply_get_drvdata(psy); switch (psp) { - case POWER_SUPPLY_PROP_CURRENT_MAX: - chip->usb_current_max = val->intval; - break; case POWER_SUPPLY_PROP_ONLINE: chip->usb_online = val->intval; break; + case POWER_SUPPLY_PROP_CURRENT_MAX: + case POWER_SUPPLY_PROP_SDP_CURRENT_MAX: + smbchg_set_sdp_current(chip, val->intval / 1000); default: return -EINVAL; } - power_supply_changed(psy); return 0; } static int -smbchg_usb_is_writeable(struct power_supply *psy, enum power_supply_property psp) +smbchg_usb_is_writeable(struct power_supply *psy, + enum power_supply_property psp) { switch (psp) { case POWER_SUPPLY_PROP_CURRENT_MAX: + case POWER_SUPPLY_PROP_SDP_CURRENT_MAX: return 1; default: break; @@ -5658,7 +5838,9 @@ static enum power_supply_property smbchg_usb_properties[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_CURRENT_MAX, POWER_SUPPLY_PROP_TYPE, + POWER_SUPPLY_PROP_REAL_TYPE, POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_SDP_CURRENT_MAX, }; #define CHARGE_OUTPUT_VTG_RATIO 840 @@ -5703,6 +5885,8 @@ static enum power_supply_property smbchg_battery_properties[] = { POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_RESISTANCE_ID, + POWER_SUPPLY_PROP_CHARGE_FULL, POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE, POWER_SUPPLY_PROP_INPUT_CURRENT_MAX, POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, @@ -5713,6 +5897,8 @@ static enum power_supply_property smbchg_battery_properties[] = { POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED, POWER_SUPPLY_PROP_RERUN_AICL, POWER_SUPPLY_PROP_RESTRICTED_CHARGING, + POWER_SUPPLY_PROP_ALLOW_HVDCP3, + POWER_SUPPLY_PROP_MAX_PULSE_ALLOWED, }; static int smbchg_battery_set_property(struct power_supply *psy, @@ -5760,7 +5946,7 @@ static int smbchg_battery_set_property(struct power_supply *psy, * Trigger a panic if there is an error while switching * buck frequency. This will prevent LS FET damage. */ - BUG_ON(1); + WARN_ON(1); } rc = smbchg_otg_pulse_skip_disable(chip, @@ -5790,6 +5976,12 @@ static int smbchg_battery_set_property(struct power_supply *psy, if (chip->typec_psy) update_typec_otg_status(chip, val->intval, false); break; + case POWER_SUPPLY_PROP_ALLOW_HVDCP3: + if (chip->allow_hvdcp3_detection != val->intval) { + chip->allow_hvdcp3_detection = !!val->intval; + power_supply_changed(chip->batt_psy); + } + break; default: return -EINVAL; } @@ -5813,6 +6005,7 @@ static int smbchg_battery_is_writeable(struct power_supply *psy, case POWER_SUPPLY_PROP_DP_DM: case POWER_SUPPLY_PROP_RERUN_AICL: case POWER_SUPPLY_PROP_RESTRICTED_CHARGING: + case POWER_SUPPLY_PROP_ALLOW_HVDCP3: rc = 1; break; default: @@ -5886,6 +6079,12 @@ static int smbchg_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = get_prop_batt_voltage_now(chip); break; + case POWER_SUPPLY_PROP_RESISTANCE_ID: + val->intval = get_prop_batt_resistance_id(chip); + break; + case POWER_SUPPLY_PROP_CHARGE_FULL: + val->intval = get_prop_batt_full_charge(chip); + break; case POWER_SUPPLY_PROP_TEMP: val->intval = get_prop_batt_temp(chip); break; @@ -5913,6 +6112,12 @@ static int smbchg_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW: val->intval = smbchg_get_iusb(chip); break; + case POWER_SUPPLY_PROP_ALLOW_HVDCP3: + val->intval = chip->allow_hvdcp3_detection; + break; + case POWER_SUPPLY_PROP_MAX_PULSE_ALLOWED: + val->intval = chip->max_pulse_allowed; + break; default: return -EINVAL; } @@ -6134,7 +6339,10 @@ static irqreturn_t fastchg_handler(int irq, void *_chip) struct smbchg_chip *chip = _chip; pr_smb(PR_INTERRUPT, "p2f triggered\n"); - smbchg_parallel_usb_check_ok(chip); + if (is_usb_present(chip) || is_dc_present(chip)) { + smbchg_detect_parallel_charger(chip); + smbchg_parallel_usb_check_ok(chip); + } if (chip->batt_psy) power_supply_changed(chip->batt_psy); smbchg_charging_status_change(chip); @@ -6323,8 +6531,7 @@ static irqreturn_t usbin_uv_handler(int irq, void *_chip) */ if (!(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) { pr_smb(PR_MISC, "setting usb dp=f dm=f\n"); - if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg)) - rc = regulator_enable(chip->dpdm_reg); + rc = smbchg_request_dpdm(chip, true); if (rc < 0) { pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc); return rc; @@ -6568,20 +6775,13 @@ static int determine_initial_status(struct smbchg_chip *chip) } else { usbid_change_handler(0, chip); } - src_detect_handler(0, chip); chip->usb_present = is_usb_present(chip); chip->dc_present = is_dc_present(chip); if (chip->usb_present) { - int rc = 0; pr_smb(PR_MISC, "setting usb dp=f dm=f\n"); - if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg)) - rc = regulator_enable(chip->dpdm_reg); - if (rc < 0) { - pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc); - return rc; - } + smbchg_request_dpdm(chip, true); handle_usb_insertion(chip); } else { handle_usb_removal(chip); @@ -6621,6 +6821,7 @@ static const char * const bpd_label[] = { static inline int get_bpd(const char *name) { int i = 0; + for (i = 0; i < ARRAY_SIZE(bpd_label); i++) { if (strcmp(bpd_label[i], name) == 0) return i; @@ -6746,7 +6947,22 @@ static int smbchg_hw_init(struct smbchg_chip *chip) chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR]); /* Setup 9V HVDCP */ - if (!chip->hvdcp_not_supported) { + if (chip->hvdcp_not_supported) { + rc = vote(chip->hvdcp_enable_votable, HVDCP_PMIC_VOTER, + true, 0); + if (rc < 0) { + dev_err(chip->dev, "Couldn't disable HVDCP rc=%d\n", + rc); + return rc; + } + } else { + rc = vote(chip->hvdcp_enable_votable, HVDCP_PMIC_VOTER, + true, 1); + if (rc < 0) { + dev_err(chip->dev, "Couldn't enable HVDCP rc=%d\n", + rc); + return rc; + } rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, HVDCP_ADAPTER_SEL_MASK, HVDCP_9V); @@ -6898,9 +7114,9 @@ static int smbchg_hw_init(struct smbchg_chip *chip) if (chip->iterm_disabled) { dev_err(chip->dev, "Error: Both iterm_disabled and iterm_ma set\n"); return -EINVAL; - } else { - smbchg_iterm_set(chip, chip->iterm_ma); } + + smbchg_iterm_set(chip, chip->iterm_ma); } /* set the safety time voltage */ @@ -7140,7 +7356,7 @@ static int smbchg_hw_init(struct smbchg_chip *chip) return rc; } -static struct of_device_id smbchg_match_table[] = { +static const struct of_device_id smbchg_match_table[] = { { .compatible = "qcom,qpnp-smbcharger", }, @@ -7157,7 +7373,7 @@ do { \ prop = -EINVAL; \ \ retval = of_property_read_u32(chip->pdev->dev.of_node, \ - "qcom," dt_property , \ + "qcom," dt_property, \ &prop); \ \ if ((retval == -EINVAL) && optional) \ @@ -7196,10 +7412,9 @@ static int smb_parse_wipower_map_dt(struct smbchg_chip *chip, num = total_elements / RANGE_ENTRY; map->entries = devm_kzalloc(chip->dev, num * sizeof(struct ilim_entry), GFP_KERNEL); - if (!map->entries) { - dev_err(chip->dev, "kzalloc failed for default ilim\n"); + if (!map->entries) return -ENOMEM; - } + for (i = 0; i < num; i++) { map->entries[i].vmin_uv = be32_to_cpup(data++); map->entries[i].vmax_uv = be32_to_cpup(data++); @@ -7264,6 +7479,7 @@ err: #define DEFAULT_VLED_MAX_UV 3500000 #define DEFAULT_FCC_MA 2000 +#define DEFAULT_NUM_OF_PULSE_ALLOWED 20 static int smb_parse_dt(struct smbchg_chip *chip) { int rc = 0, ocp_thresh = -EINVAL; @@ -7322,6 +7538,11 @@ static int smb_parse_dt(struct smbchg_chip *chip) if (chip->parallel.min_current_thr_ma != -EINVAL && chip->parallel.min_9v_current_thr_ma != -EINVAL) chip->parallel.avail = true; + + OF_PROP_READ(chip, chip->max_pulse_allowed, + "max-pulse-allowed", rc, 1); + if (chip->max_pulse_allowed == -EINVAL) + chip->max_pulse_allowed = DEFAULT_NUM_OF_PULSE_ALLOWED; /* * use the dt values if they exist, otherwise do not touch the params */ @@ -7472,19 +7693,19 @@ static int smb_parse_dt(struct smbchg_chip *chip) #define SMBCHG_LITE_MISC_SUBTYPE 0x57 static int smbchg_request_irq(struct smbchg_chip *chip, struct device_node *child, - int irq_num, char *irq_name, + int *irq_num, char *irq_name, irqreturn_t (irq_handler)(int irq, void *_chip), int flags) { int rc; - irq_num = of_irq_get_byname(child, irq_name); - if (irq_num < 0) { + *irq_num = of_irq_get_byname(child, irq_name); + if (*irq_num < 0) { dev_err(chip->dev, "Unable to get %s irqn", irq_name); rc = -ENXIO; } rc = devm_request_threaded_irq(chip->dev, - irq_num, NULL, irq_handler, flags, irq_name, + *irq_num, NULL, irq_handler, flags, irq_name, chip); if (rc < 0) { dev_err(chip->dev, "Unable to request %s irq: %dn", @@ -7526,26 +7747,28 @@ static int smbchg_request_irqs(struct smbchg_chip *chip) case SMBCHG_CHGR_SUBTYPE: case SMBCHG_LITE_CHGR_SUBTYPE: rc = smbchg_request_irq(chip, child, - chip->chg_error_irq, "chg-error", + &chip->chg_error_irq, "chg-error", chg_error_handler, flags); if (rc < 0) return rc; - rc = smbchg_request_irq(chip, child, chip->taper_irq, + rc = smbchg_request_irq(chip, child, &chip->taper_irq, "chg-taper-thr", taper_handler, (IRQF_TRIGGER_RISING | IRQF_ONESHOT)); if (rc < 0) return rc; disable_irq_nosync(chip->taper_irq); - rc = smbchg_request_irq(chip, child, chip->chg_term_irq, + rc = smbchg_request_irq(chip, child, + &chip->chg_term_irq, "chg-tcc-thr", chg_term_handler, (IRQF_TRIGGER_RISING | IRQF_ONESHOT)); if (rc < 0) return rc; - rc = smbchg_request_irq(chip, child, chip->recharge_irq, + rc = smbchg_request_irq(chip, child, + &chip->recharge_irq, "chg-rechg-thr", recharge_handler, flags); if (rc < 0) return rc; - rc = smbchg_request_irq(chip, child, chip->fastchg_irq, + rc = smbchg_request_irq(chip, child, &chip->fastchg_irq, "chg-p2f-thr", fastchg_handler, flags); if (rc < 0) return rc; @@ -7555,36 +7778,37 @@ static int smbchg_request_irqs(struct smbchg_chip *chip) break; case SMBCHG_BAT_IF_SUBTYPE: case SMBCHG_LITE_BAT_IF_SUBTYPE: - rc = smbchg_request_irq(chip, child, chip->batt_hot_irq, + rc = smbchg_request_irq(chip, child, + &chip->batt_hot_irq, "batt-hot", batt_hot_handler, flags); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->batt_warm_irq, + &chip->batt_warm_irq, "batt-warm", batt_warm_handler, flags); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->batt_cool_irq, + &chip->batt_cool_irq, "batt-cool", batt_cool_handler, flags); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->batt_cold_irq, + &chip->batt_cold_irq, "batt-cold", batt_cold_handler, flags); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->batt_missing_irq, + &chip->batt_missing_irq, "batt-missing", batt_pres_handler, flags); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->vbat_low_irq, + &chip->vbat_low_irq, "batt-low", vbat_low_handler, flags); if (rc < 0) return rc; - + enable_irq_wake(chip->batt_hot_irq); enable_irq_wake(chip->batt_warm_irq); enable_irq_wake(chip->batt_cool_irq); @@ -7595,24 +7819,24 @@ static int smbchg_request_irqs(struct smbchg_chip *chip) case SMBCHG_USB_CHGPTH_SUBTYPE: case SMBCHG_LITE_USB_CHGPTH_SUBTYPE: rc = smbchg_request_irq(chip, child, - chip->usbin_uv_irq, + &chip->usbin_uv_irq, "usbin-uv", usbin_uv_handler, flags | IRQF_EARLY_RESUME); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->usbin_ov_irq, + &chip->usbin_ov_irq, "usbin-ov", usbin_ov_handler, flags); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->src_detect_irq, + &chip->src_detect_irq, "usbin-src-det", src_detect_handler, flags); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->aicl_done_irq, + &chip->aicl_done_irq, "aicl-done", aicl_done_handler, (IRQF_TRIGGER_RISING | IRQF_ONESHOT)); @@ -7621,18 +7845,18 @@ static int smbchg_request_irqs(struct smbchg_chip *chip) if (chip->schg_version != QPNP_SCHG_LITE) { rc = smbchg_request_irq(chip, child, - chip->otg_fail_irq, "otg-fail", + &chip->otg_fail_irq, "otg-fail", otg_fail_handler, flags); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->otg_oc_irq, "otg-oc", + &chip->otg_oc_irq, "otg-oc", otg_oc_handler, (IRQF_TRIGGER_RISING | IRQF_ONESHOT)); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->usbid_change_irq, "usbid-change", + &chip->usbid_change_irq, "usbid-change", usbid_change_handler, (IRQF_TRIGGER_FALLING | IRQF_ONESHOT)); if (rc < 0) @@ -7651,7 +7875,7 @@ static int smbchg_request_irqs(struct smbchg_chip *chip) break; case SMBCHG_DC_CHGPTH_SUBTYPE: case SMBCHG_LITE_DC_CHGPTH_SUBTYPE: - rc = smbchg_request_irq(chip, child, chip->dcin_uv_irq, + rc = smbchg_request_irq(chip, child, &chip->dcin_uv_irq, "dcin-uv", dcin_uv_handler, flags); if (rc < 0) return rc; @@ -7659,16 +7883,17 @@ static int smbchg_request_irqs(struct smbchg_chip *chip) break; case SMBCHG_MISC_SUBTYPE: case SMBCHG_LITE_MISC_SUBTYPE: - rc = smbchg_request_irq(chip, child, chip->power_ok_irq, + rc = smbchg_request_irq(chip, child, + &chip->power_ok_irq, "power-ok", power_ok_handler, flags); if (rc < 0) return rc; - rc = smbchg_request_irq(chip, child, chip->chg_hot_irq, + rc = smbchg_request_irq(chip, child, &chip->chg_hot_irq, "temp-shutdown", chg_hot_handler, flags); if (rc < 0) return rc; - rc = smbchg_request_irq(chip, child, chip->wdog_timeout_irq, - "wdog-timeout", + rc = smbchg_request_irq(chip, child, + &chip->wdog_timeout_irq, "wdog-timeout", wdog_timeout_handler, flags); if (rc < 0) return rc; @@ -7679,19 +7904,19 @@ static int smbchg_request_irqs(struct smbchg_chip *chip) break; case SMBCHG_LITE_OTG_SUBTYPE: rc = smbchg_request_irq(chip, child, - chip->usbid_change_irq, "usbid-change", + &chip->usbid_change_irq, "usbid-change", usbid_change_handler, (IRQF_TRIGGER_FALLING | IRQF_ONESHOT)); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->otg_oc_irq, "otg-oc", + &chip->otg_oc_irq, "otg-oc", otg_oc_handler, (IRQF_TRIGGER_RISING | IRQF_ONESHOT)); if (rc < 0) return rc; rc = smbchg_request_irq(chip, child, - chip->otg_fail_irq, "otg-fail", + &chip->otg_fail_irq, "otg-fail", otg_fail_handler, flags); if (rc < 0) return rc; @@ -7828,8 +8053,7 @@ static int create_debugfs_entries(struct smbchg_chip *chip) } ent = debugfs_create_file("force_dcin_icl_check", - S_IFREG | S_IWUSR | S_IRUGO, - chip->debug_root, chip, + 00100644, chip->debug_root, chip, &force_dcin_icl_ops); if (!ent) { dev_err(chip->dev, @@ -7870,6 +8094,7 @@ static int smbchg_check_chg_version(struct smbchg_chip *chip) chip->schg_version = QPNP_SCHG; break; case PMI8950: + chip->wa_flags |= SMBCHG_RESTART_WA; case PMI8937: chip->wa_flags |= SMBCHG_BATT_OV_WA; if (pmic_rev_id->rev4 < 2) /* PMI8950 1.0 */ { @@ -7927,20 +8152,18 @@ static void rerun_hvdcp_det_if_necessary(struct smbchg_chip *chip) pr_err("Couldn't vote for 300mA for suspend wa, going ahead rc=%d\n", rc); - pr_smb(PR_STATUS, "Faking Removal\n"); - fake_insertion_removal(chip, false); - msleep(500); - pr_smb(PR_STATUS, "Faking Insertion\n"); - fake_insertion_removal(chip, true); + rc = rerun_apsd(chip); + if (rc) + pr_err("APSD rerun failed rc=%d\n", rc); read_usb_type(chip, &usb_type_name, &usb_supply_type); if (usb_supply_type != POWER_SUPPLY_TYPE_USB_DCP) { msleep(500); - pr_smb(PR_STATUS, "Fake Removal again as type!=DCP\n"); - fake_insertion_removal(chip, false); - msleep(500); - pr_smb(PR_STATUS, "Fake Insert again as type!=DCP\n"); - fake_insertion_removal(chip, true); + pr_smb(PR_STATUS, "Rerun APSD as type !=DCP\n"); + + rc = rerun_apsd(chip); + if (rc) + pr_err("APSD rerun failed rc=%d\n", rc); } rc = vote(chip->usb_icl_votable, @@ -7948,6 +8171,14 @@ static void rerun_hvdcp_det_if_necessary(struct smbchg_chip *chip) if (rc < 0) pr_err("Couldn't vote for 0 for suspend wa, going ahead rc=%d\n", rc); + + /* Schedule work for HVDCP detection */ + if (!chip->hvdcp_not_supported) { + cancel_delayed_work_sync(&chip->hvdcp_det_work); + smbchg_stay_awake(chip, PM_DETECT_HVDCP); + schedule_delayed_work(&chip->hvdcp_det_work, + msecs_to_jiffies(HVDCP_NOTIFY_MS)); + } } } @@ -7956,7 +8187,7 @@ static int smbchg_probe(struct platform_device *pdev) int rc; struct smbchg_chip *chip; struct power_supply *typec_psy = NULL; - struct qpnp_vadc_chip *vadc_dev, *vchg_vadc_dev; + struct qpnp_vadc_chip *vadc_dev = NULL, *vchg_vadc_dev = NULL; const char *typec_psy_name; struct power_supply_config usb_psy_cfg = {}; struct power_supply_config batt_psy_cfg = {}; @@ -8090,6 +8321,15 @@ static int smbchg_probe(struct platform_device *pdev) goto votables_cleanup; } + chip->hvdcp_enable_votable = create_votable( + "HVDCP_ENABLE", + VOTE_MIN, + smbchg_hvdcp_enable_cb, chip); + if (IS_ERR(chip->hvdcp_enable_votable)) { + rc = PTR_ERR(chip->hvdcp_enable_votable); + goto votables_cleanup; + } + INIT_WORK(&chip->usb_set_online_work, smbchg_usb_update_online_work); INIT_DELAYED_WORK(&chip->parallel_en_work, smbchg_parallel_usb_en_work); @@ -8178,18 +8418,10 @@ static int smbchg_probe(struct platform_device *pdev) goto votables_cleanup; } - if (of_find_property(chip->dev->of_node, "dpdm-supply", NULL)) { - chip->dpdm_reg = devm_regulator_get(chip->dev, "dpdm"); - if (IS_ERR(chip->dpdm_reg)) { - rc = PTR_ERR(chip->dpdm_reg); - goto votables_cleanup; - } - } - rc = smbchg_hw_init(chip); if (rc < 0) { dev_err(&pdev->dev, - "Unable to intialize hardware rc = %d\n", rc); + "Unable to initialize hardware rc = %d\n", rc); goto out; } @@ -8247,6 +8479,7 @@ static int smbchg_probe(struct platform_device *pdev) goto out; } } + chip->allow_hvdcp3_detection = true; if (chip->cfg_chg_led_support && chip->schg_version == QPNP_SCHG_LITE) { @@ -8275,6 +8508,7 @@ static int smbchg_probe(struct platform_device *pdev) rerun_hvdcp_det_if_necessary(chip); + update_usb_status(chip, is_usb_present(chip), false); dump_regs(chip); create_debugfs_entries(chip); dev_info(chip->dev, @@ -8292,6 +8526,8 @@ unregister_led_class: out: handle_usb_removal(chip); votables_cleanup: + if (chip->hvdcp_enable_votable) + destroy_votable(chip->hvdcp_enable_votable); if (chip->aicl_deglitch_short_votable) destroy_votable(chip->aicl_deglitch_short_votable); if (chip->hw_aicl_rerun_enable_indirect_votable) @@ -8343,6 +8579,12 @@ static void smbchg_shutdown(struct platform_device *pdev) if (!is_hvdcp_present(chip)) return; + pr_smb(PR_MISC, "Reducing to 500mA\n"); + rc = vote(chip->usb_icl_votable, SHUTDOWN_WORKAROUND_ICL_VOTER, true, + 500); + if (rc < 0) + pr_err("Couldn't vote 500mA ICL\n"); + pr_smb(PR_MISC, "Disable Parallel\n"); mutex_lock(&chip->parallel.lock); smbchg_parallel_en = 0; @@ -8365,11 +8607,9 @@ static void smbchg_shutdown(struct platform_device *pdev) disable_irq(chip->otg_oc_irq); disable_irq(chip->power_ok_irq); disable_irq(chip->recharge_irq); - disable_irq(chip->src_detect_irq); disable_irq(chip->taper_irq); disable_irq(chip->usbid_change_irq); disable_irq(chip->usbin_ov_irq); - disable_irq(chip->usbin_uv_irq); disable_irq(chip->vbat_low_irq); disable_irq(chip->wdog_timeout_irq); @@ -8412,8 +8652,7 @@ static void smbchg_shutdown(struct platform_device *pdev) /* disable HVDCP */ pr_smb(PR_MISC, "Disable HVDCP\n"); - rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG, - HVDCP_EN_BIT, 0); + rc = vote(chip->hvdcp_enable_votable, HVDCP_PMIC_VOTER, true, 0); if (rc < 0) pr_err("Couldn't disable HVDCP rc=%d\n", rc); @@ -8430,6 +8669,9 @@ static void smbchg_shutdown(struct platform_device *pdev) if (rc < 0) pr_err("Couldn't fake insertion rc=%d\n", rc); + disable_irq(chip->src_detect_irq); + disable_irq(chip->usbin_uv_irq); + pr_smb(PR_MISC, "Wait 1S to settle\n"); msleep(1000); chip->hvdcp_3_det_ignore_uv = false; diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c index 4cef8904a76a..42015f3274ed 100644 --- a/drivers/regulator/qpnp-labibb-regulator.c +++ b/drivers/regulator/qpnp-labibb-regulator.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, 2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -67,6 +67,7 @@ #define REG_LAB_PRECHARGE_CTL 0x5E #define REG_LAB_SOFT_START_CTL 0x5F #define REG_LAB_SPARE_CTL 0x60 +#define REG_LAB_MISC_CTL 0x60 /* PMI8998/PM660A */ #define REG_LAB_PFM_CTL 0x62 /* LAB registers for PM660A */ @@ -139,6 +140,9 @@ #define LAB_SPARE_TOUCH_WAKE_BIT BIT(3) #define LAB_SPARE_DISABLE_SCP_BIT BIT(0) +/* REG_LAB_MISC_CTL */ +#define LAB_AUTO_GM_BIT BIT(4) + /* REG_LAB_PFM_CTL */ #define LAB_PFM_EN_BIT BIT(7) @@ -593,6 +597,7 @@ struct qpnp_labibb { struct device *dev; struct platform_device *pdev; struct regmap *regmap; + struct class labibb_class; struct pmic_revid_data *pmic_rev_id; u16 lab_base; u16 ibb_base; @@ -620,6 +625,8 @@ struct qpnp_labibb { bool notify_lab_vreg_ok_sts; bool detect_lab_sc; bool sc_detected; + /* Tracks the secure UI mode entry/exit */ + bool secure_mode; u32 swire_2nd_cmd_delay; u32 swire_ibb_ps_enable_delay; }; @@ -1866,7 +1873,7 @@ static int qpnp_labibb_save_settings(struct qpnp_labibb *labibb) static int qpnp_labibb_ttw_enter_ibb_common(struct qpnp_labibb *labibb) { int rc = 0; - u8 val; + u8 val, mask; val = 0; rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_PD_CTL, @@ -1886,10 +1893,16 @@ static int qpnp_labibb_ttw_enter_ibb_common(struct qpnp_labibb *labibb) return rc; } - val = IBB_WAIT_MBG_OK; + if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) { + val = 0; + mask = IBB_DIS_DLY_MASK; + } else { + val = IBB_WAIT_MBG_OK; + mask = IBB_DIS_DLY_MASK | IBB_WAIT_MBG_OK; + } + rc = qpnp_labibb_sec_masked_write(labibb, labibb->ibb_base, - REG_IBB_PWRUP_PWRDN_CTL_2, - IBB_DIS_DLY_MASK | IBB_WAIT_MBG_OK, val); + REG_IBB_PWRUP_PWRDN_CTL_2, mask, val); if (rc < 0) { pr_err("write to register %x failed rc = %d\n", REG_IBB_PWRUP_PWRDN_CTL_2, rc); @@ -1965,7 +1978,7 @@ static int qpnp_labibb_ttw_enter_ibb_pmi8950(struct qpnp_labibb *labibb) static int qpnp_labibb_regulator_ttw_mode_enter(struct qpnp_labibb *labibb) { int rc = 0; - u8 val; + u8 val, reg; /* Save the IBB settings before they get modified for TTW mode */ if (!labibb->ibb_settings_saved) { @@ -2027,10 +2040,17 @@ static int qpnp_labibb_regulator_ttw_mode_enter(struct qpnp_labibb *labibb) } val = LAB_SPARE_DISABLE_SCP_BIT; + if (labibb->pmic_rev_id->pmic_subtype != PMI8950_SUBTYPE) val |= LAB_SPARE_TOUCH_WAKE_BIT; - rc = qpnp_labibb_write(labibb, labibb->lab_base + - REG_LAB_SPARE_CTL, &val, 1); + + if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) { + reg = REG_LAB_MISC_CTL; + val |= LAB_AUTO_GM_BIT; + } else { + reg = REG_LAB_SPARE_CTL; + } + rc = qpnp_labibb_write(labibb, labibb->lab_base + reg, &val, 1); if (rc < 0) { pr_err("qpnp_labibb_write register %x failed rc = %d\n", REG_LAB_SPARE_CTL, rc); @@ -2060,7 +2080,15 @@ static int qpnp_labibb_regulator_ttw_mode_enter(struct qpnp_labibb *labibb) case PMI8950_SUBTYPE: rc = qpnp_labibb_ttw_enter_ibb_pmi8950(labibb); break; + case PMI8998_SUBTYPE: + rc = labibb->lab_ver_ops->ps_ctl(labibb, 70, true); + if (rc < 0) + break; + + rc = qpnp_ibb_ps_config(labibb, true); + break; } + if (rc < 0) { pr_err("Failed to configure TTW-enter for IBB rc=%d\n", rc); return rc; @@ -2093,7 +2121,7 @@ static int qpnp_labibb_ttw_exit_ibb_common(struct qpnp_labibb *labibb) static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb) { int rc = 0; - u8 val; + u8 val, reg; if (!labibb->ibb_settings_saved) { pr_err("IBB settings are not saved!\n"); @@ -2127,8 +2155,14 @@ static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb) } val = 0; - rc = qpnp_labibb_write(labibb, labibb->lab_base + - REG_LAB_SPARE_CTL, &val, 1); + if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) { + reg = REG_LAB_MISC_CTL; + val |= LAB_AUTO_GM_BIT; + } else { + reg = REG_LAB_SPARE_CTL; + } + + rc = qpnp_labibb_write(labibb, labibb->lab_base + reg, &val, 1); if (rc < 0) { pr_err("qpnp_labibb_write register %x failed rc = %d\n", REG_LAB_SPARE_CTL, rc); @@ -2432,6 +2466,9 @@ static int qpnp_lab_regulator_enable(struct regulator_dev *rdev) int rc; struct qpnp_labibb *labibb = rdev_get_drvdata(rdev); + if (labibb->secure_mode) + return 0; + if (labibb->sc_detected) { pr_info("Short circuit detected: disabled LAB/IBB rails\n"); return 0; @@ -2469,6 +2506,9 @@ static int qpnp_lab_regulator_disable(struct regulator_dev *rdev) u8 val; struct qpnp_labibb *labibb = rdev_get_drvdata(rdev); + if (labibb->secure_mode) + return 0; + if (labibb->lab_vreg.vreg_enabled && !labibb->swire_control) { if (!labibb->standalone) @@ -2662,7 +2702,7 @@ static int qpnp_lab_regulator_set_voltage(struct regulator_dev *rdev, u8 val; struct qpnp_labibb *labibb = rdev_get_drvdata(rdev); - if (labibb->swire_control) + if (labibb->swire_control || labibb->secure_mode) return 0; if (min_uV < labibb->lab_vreg.min_volt) { @@ -2809,8 +2849,11 @@ static bool is_lab_vreg_ok_irq_available(struct qpnp_labibb *labibb) return true; if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE && - labibb->mode == QPNP_LABIBB_LCD_MODE) + labibb->mode == QPNP_LABIBB_LCD_MODE) { + if (labibb->ttw_en) + return false; return true; + } return false; } @@ -3038,6 +3081,8 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb, } if (is_lab_vreg_ok_irq_available(labibb)) { + irq_set_status_flags(labibb->lab_vreg.lab_vreg_ok_irq, + IRQ_DISABLE_UNLAZY); rc = devm_request_threaded_irq(labibb->dev, labibb->lab_vreg.lab_vreg_ok_irq, NULL, lab_vreg_ok_handler, @@ -3051,6 +3096,8 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb, } if (labibb->lab_vreg.lab_sc_irq != -EINVAL) { + irq_set_status_flags(labibb->lab_vreg.lab_sc_irq, + IRQ_DISABLE_UNLAZY); rc = devm_request_threaded_irq(labibb->dev, labibb->lab_vreg.lab_sc_irq, NULL, labibb_sc_err_handler, @@ -3534,6 +3581,9 @@ static int qpnp_ibb_regulator_enable(struct regulator_dev *rdev) int rc = 0; struct qpnp_labibb *labibb = rdev_get_drvdata(rdev); + if (labibb->secure_mode) + return 0; + if (labibb->sc_detected) { pr_info("Short circuit detected: disabled LAB/IBB rails\n"); return 0; @@ -3559,6 +3609,9 @@ static int qpnp_ibb_regulator_disable(struct regulator_dev *rdev) int rc; struct qpnp_labibb *labibb = rdev_get_drvdata(rdev); + if (labibb->secure_mode) + return 0; + if (labibb->ibb_vreg.vreg_enabled && !labibb->swire_control) { if (!labibb->standalone) @@ -3592,7 +3645,7 @@ static int qpnp_ibb_regulator_set_voltage(struct regulator_dev *rdev, struct qpnp_labibb *labibb = rdev_get_drvdata(rdev); - if (labibb->swire_control) + if (labibb->swire_control || labibb->secure_mode) return 0; rc = labibb->ibb_ver_ops->set_voltage(labibb, min_uV, max_uV); @@ -3821,6 +3874,8 @@ static int register_qpnp_ibb_regulator(struct qpnp_labibb *labibb, } if (labibb->ibb_vreg.ibb_sc_irq != -EINVAL) { + irq_set_status_flags(labibb->ibb_vreg.ibb_sc_irq, + IRQ_DISABLE_UNLAZY); rc = devm_request_threaded_irq(labibb->dev, labibb->ibb_vreg.ibb_sc_irq, NULL, labibb_sc_err_handler, @@ -3969,6 +4024,9 @@ static int qpnp_labibb_check_ttw_supported(struct qpnp_labibb *labibb) case PMI8950_SUBTYPE: /* TTW supported for all revisions */ break; + case PMI8998_SUBTYPE: + /* TTW supported for all revisions */ + break; default: pr_info("TTW mode not supported for PMIC-subtype = %d\n", labibb->pmic_rev_id->pmic_subtype); @@ -3979,6 +4037,49 @@ static int qpnp_labibb_check_ttw_supported(struct qpnp_labibb *labibb) return rc; } +static ssize_t qpnp_labibb_irq_control(struct class *c, + struct class_attribute *attr, + const char *buf, size_t count) +{ + struct qpnp_labibb *labibb = container_of(c, struct qpnp_labibb, + labibb_class); + int val, rc; + + rc = kstrtouint(buf, 0, &val); + if (rc < 0) + return rc; + + if (val != 0 && val != 1) + return count; + + /* Disable irqs */ + if (val == 1 && !labibb->secure_mode) { + if (labibb->lab_vreg.lab_vreg_ok_irq > 0) + disable_irq(labibb->lab_vreg.lab_vreg_ok_irq); + if (labibb->lab_vreg.lab_sc_irq > 0) + disable_irq(labibb->lab_vreg.lab_sc_irq); + if (labibb->ibb_vreg.ibb_sc_irq > 0) + disable_irq(labibb->ibb_vreg.ibb_sc_irq); + labibb->secure_mode = true; + } else if (val == 0 && labibb->secure_mode) { + if (labibb->lab_vreg.lab_vreg_ok_irq > 0) + enable_irq(labibb->lab_vreg.lab_vreg_ok_irq); + if (labibb->lab_vreg.lab_sc_irq > 0) + enable_irq(labibb->lab_vreg.lab_sc_irq); + if (labibb->ibb_vreg.ibb_sc_irq > 0) + enable_irq(labibb->ibb_vreg.ibb_sc_irq); + labibb->secure_mode = false; + } + + return count; +} + +static struct class_attribute labibb_attributes[] = { + [0] = __ATTR(secure_mode, 0664, NULL, + qpnp_labibb_irq_control), + __ATTR_NULL, +}; + static int qpnp_labibb_regulator_probe(struct platform_device *pdev) { struct qpnp_labibb *labibb; @@ -4171,6 +4272,17 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev) CLOCK_MONOTONIC, HRTIMER_MODE_REL); labibb->sc_err_check_timer.function = labibb_check_sc_err_count; dev_set_drvdata(&pdev->dev, labibb); + + labibb->labibb_class.name = "lcd_bias"; + labibb->labibb_class.owner = THIS_MODULE; + labibb->labibb_class.class_attrs = labibb_attributes; + + rc = class_register(&labibb->labibb_class); + if (rc < 0) { + pr_err("Failed to register labibb class rc=%d\n", rc); + return rc; + } + pr_info("LAB/IBB registered successfully, lab_vreg enable=%d ibb_vreg enable=%d swire_control=%d\n", labibb->lab_vreg.vreg_enabled, labibb->ibb_vreg.vreg_enabled, diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 944156207477..dcb949dcfa66 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -43,7 +43,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work) static void __ref sclp_cpu_change_notify(struct work_struct *work) { + lock_device_hotplug(); smp_rescan_cpus(); + unlock_device_hotplug(); } static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index c2d2c17550a7..951f22265105 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ + {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c index 6d1e2f746ab4..8d6253903f24 100644 --- a/drivers/tty/n_hdlc.c +++ b/drivers/tty/n_hdlc.c @@ -598,6 +598,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, /* too large for caller's buffer */ ret = -EOVERFLOW; } else { + __set_current_state(TASK_RUNNING); if (copy_to_user(buf, rbuf->buf, rbuf->count)) ret = -EFAULT; else diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index c1cff2b455ae..5b86ebc76a8a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2297,7 +2297,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p) return -EFAULT; tty_audit_tiocsti(tty, ch); ld = tty_ldisc_ref_wait(tty); - ld->ops->receive_buf(tty, &ch, &mbz, 1); + if (ld->ops->receive_buf) + ld->ops->receive_buf(tty, &ch, &mbz, 1); tty_ldisc_deref(ld); return 0; } diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index ff3286fc22d8..6779f733bb83 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -958,6 +958,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (CON_IS_VISIBLE(vc)) update_screen(vc); vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); + notify_update(vc); return err; } diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 4966768d3c98..9706d214c409 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -47,6 +47,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index a84f0959ab34..d84c3b3d477b 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -13,6 +13,7 @@ #define PL2303_VENDOR_ID 0x067b #define PL2303_PRODUCT_ID 0x2303 +#define PL2303_PRODUCT_ID_TB 0x2304 #define PL2303_PRODUCT_ID_RSAQ2 0x04bb #define PL2303_PRODUCT_ID_DCU11 0x1234 #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 @@ -25,6 +26,7 @@ #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 #define PL2303_PRODUCT_ID_ZTEK 0xe1f1 + #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 #define ATEN_PRODUCT_ID 0x2008 diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 6d6acf2c07c3..511242111403 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -88,7 +88,8 @@ DEVICE(moto_modem, MOTO_IDS); /* Motorola Tetra driver */ #define MOTOROLA_TETRA_IDS() \ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ - { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */ + { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \ + { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); /* Novatel Wireless GPS driver */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 1eeb4780c3ed..eacf57c24ca9 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -48,6 +48,7 @@ #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" +#include "dns_resolve.h" #include "ntlmssp.h" #include "nterr.h" #include "rfc1002pdu.h" @@ -304,6 +305,53 @@ static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data, const char *devname); /* + * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may + * get their ip addresses changed at some point. + * + * This should be called with server->srv_mutex held. + */ +#ifdef CONFIG_CIFS_DFS_UPCALL +static int reconn_set_ipaddr(struct TCP_Server_Info *server) +{ + int rc; + int len; + char *unc, *ipaddr = NULL; + + if (!server->hostname) + return -EINVAL; + + len = strlen(server->hostname) + 3; + + unc = kmalloc(len, GFP_KERNEL); + if (!unc) { + cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__); + return -ENOMEM; + } + snprintf(unc, len, "\\\\%s", server->hostname); + + rc = dns_resolve_server_name_to_ip(unc, &ipaddr); + kfree(unc); + + if (rc < 0) { + cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n", + __func__, server->hostname, rc); + return rc; + } + + rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr, + strlen(ipaddr)); + kfree(ipaddr); + + return !rc ? -1 : 0; +} +#else +static inline int reconn_set_ipaddr(struct TCP_Server_Info *server) +{ + return 0; +} +#endif + +/* * cifs tcp session reconnection * * mark tcp session as reconnecting so temporarily locked @@ -400,6 +448,11 @@ cifs_reconnect(struct TCP_Server_Info *server) rc = generic_ip_connect(server); if (rc) { cifs_dbg(FYI, "reconnect error %d\n", rc); + rc = reconn_set_ipaddr(server); + if (rc) { + cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n", + __func__, rc); + } mutex_unlock(&server->srv_mutex); msleep(3000); } else { diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 2725085a3f9f..eae3cdffaf7f 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -143,14 +143,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, scredits = server->credits; /* can deadlock with reopen */ - if (scredits == 1) { + if (scredits <= 8) { *num = SMB2_MAX_BUFFER_SIZE; *credits = 0; break; } - /* leave one credit for a possible reopen */ - scredits--; + /* leave some credits for reopen and other ops */ + scredits -= 8; *num = min_t(unsigned int, size, scredits * SMB2_MAX_BUFFER_SIZE); diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index f7111bb88ec1..5e21d58c49ef 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -2523,8 +2523,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) { srch_inf->endOfSearch = true; rc = 0; - } - cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); + } else + cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); goto qdir_exit; } diff --git a/fs/dcache.c b/fs/dcache.c index 86f52a555dec..2416ad64cc62 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1155,15 +1155,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, */ void shrink_dcache_sb(struct super_block *sb) { - long freed; - do { LIST_HEAD(dispose); - freed = list_lru_walk(&sb->s_dentry_lru, + list_lru_walk(&sb->s_dentry_lru, dentry_lru_isolate_shrink, &dispose, 1024); - - this_cpu_sub(nr_dentry_unused, freed); shrink_dentry_list(&dispose); cond_resched(); } while (list_lru_count(&sb->s_dentry_lru) > 0); diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 763fe7737065..ef24894edecc 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1720,9 +1720,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, goto next_iter; } if (ret == -E2BIG) { - n += rbm->bii - initial_bii; rbm->bii = 0; rbm->offset = 0; + n += (rbm->bii - initial_bii); goto res_covered_end_of_rgrp; } return ret; diff --git a/fs/read_write.c b/fs/read_write.c index bfd1a5dddf6e..16e554ba885d 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -363,8 +363,10 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos) iter->type |= WRITE; ret = file->f_op->write_iter(&kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); - if (ret > 0) + if (ret > 0) { *ppos = kiocb.ki_pos; + fsnotify_modify(file); + } return ret; } EXPORT_SYMBOL(vfs_iter_write); diff --git a/fs/super.c b/fs/super.c index bc7ae0f327d0..689ec96c43f8 100644 --- a/fs/super.c +++ b/fs/super.c @@ -118,13 +118,23 @@ static unsigned long super_cache_count(struct shrinker *shrink, sb = container_of(shrink, struct super_block, s_shrink); /* - * Don't call trylock_super as it is a potential - * scalability bottleneck. The counts could get updated - * between super_cache_count and super_cache_scan anyway. - * Call to super_cache_count with shrinker_rwsem held - * ensures the safety of call to list_lru_shrink_count() and - * s_op->nr_cached_objects(). + * We don't call trylock_super() here as it is a scalability bottleneck, + * so we're exposed to partial setup state. The shrinker rwsem does not + * protect filesystem operations backing list_lru_shrink_count() or + * s_op->nr_cached_objects(). Counts can change between + * super_cache_count and super_cache_scan, so we really don't need locks + * here. + * + * However, if we are currently mounting the superblock, the underlying + * filesystem might be in a state of partial construction and hence it + * is dangerous to access it. trylock_super() uses a MS_BORN check to + * avoid this situation, so do the same here. The memory barrier is + * matched with the one in mount_fs() as we don't hold locks here. */ + if (!(sb->s_flags & MS_BORN)) + return 0; + smp_rmb(); + if (sb->s_op && sb->s_op->nr_cached_objects) total_objects = sb->s_op->nr_cached_objects(sb, sc); @@ -1151,6 +1161,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, struct vfsm sb = root->d_sb; BUG_ON(!sb); WARN_ON(!sb->s_bdi); + + /* + * Write barrier is for super_cache_count(). We place it before setting + * MS_BORN as the data dependency between the two functions is the + * superblock structure contents that we just set up, not the MS_BORN + * flag. + */ + smp_wmb(); sb->s_flags |= MS_BORN; error = security_sb_kern_mount(sb, flags, secdata); diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 74de8b60ce12..41ba63c9a36e 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -113,6 +113,23 @@ extern void kobject_put(struct kobject *kobj); extern const void *kobject_namespace(struct kobject *kobj); extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); +/** + * kobject_has_children - Returns whether a kobject has children. + * @kobj: the object to test + * + * This will return whether a kobject has other kobjects as children. + * + * It does NOT account for the presence of attribute files, only sub + * directories. It also assumes there is no concurrent addition or + * removal of such children, and thus relies on external locking. + */ +static inline bool kobject_has_children(struct kobject *kobj) +{ + WARN_ON_ONCE(atomic_read(&kobj->kref.refcount) == 0); + + return kobj->sd && kobj->sd->dir.subdirs; +} + struct kobj_type { void (*release)(struct kobject *kobj); const struct sysfs_ops *sysfs_ops; diff --git a/include/linux/leds-qpnp-flash.h b/include/linux/leds-qpnp-flash.h index 1fe6e1709fa6..e3b9cf148cbd 100644 --- a/include/linux/leds-qpnp-flash.h +++ b/include/linux/leds-qpnp-flash.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -21,7 +21,14 @@ #define FLASH_LED_PREPARE_OPTIONS_MASK GENMASK(3, 0) -int qpnp_flash_led_prepare(struct led_trigger *trig, int options, +#if (defined CONFIG_LEDS_QPNP_FLASH || defined CONFIG_LEDS_QPNP_FLASH_V2) +extern int (*qpnp_flash_led_prepare)(struct led_trigger *trig, int options, int *max_current); - +#else +static inline int qpnp_flash_led_prepare(struct led_trigger *trig, int options, + int *max_current) +{ + return -ENODEV; +} +#endif #endif diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 23c1e473f34b..c3764d2a2934 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -265,6 +265,8 @@ enum power_supply_property { POWER_SUPPLY_PROP_BATTERY_INFO, POWER_SUPPLY_PROP_BATTERY_INFO_ID, POWER_SUPPLY_PROP_ENABLE_JEITA_DETECTION, + POWER_SUPPLY_PROP_ALLOW_HVDCP3, + POWER_SUPPLY_PROP_MAX_PULSE_ALLOWED, /* Local extensions of type int64_t */ POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT, /* Properties of type `const char *' */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index e50b31d18462..e97cdfd6cba9 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -133,23 +133,23 @@ struct rhashtable_params { /** * struct rhashtable - Hash table handle * @tbl: Bucket table - * @nelems: Number of elements in table * @key_len: Key length for hashfn * @elasticity: Maximum chain length before rehash * @p: Configuration parameters * @run_work: Deferred worker to expand/shrink asynchronously * @mutex: Mutex to protect current/future table swapping * @lock: Spin lock to protect walker list + * @nelems: Number of elements in table */ struct rhashtable { struct bucket_table __rcu *tbl; - atomic_t nelems; unsigned int key_len; unsigned int elasticity; struct rhashtable_params p; struct work_struct run_work; struct mutex mutex; spinlock_t lock; + atomic_t nelems; }; /** @@ -343,7 +343,8 @@ int rhashtable_init(struct rhashtable *ht, struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj, - struct bucket_table *old_tbl); + struct bucket_table *old_tbl, + void **data); int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl); int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); @@ -514,18 +515,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); } -/** - * rhashtable_lookup_fast - search hash table, inlined version - * @ht: hash table - * @key: the pointer to the key - * @params: hash table parameters - * - * Computes the hash value for the key and traverses the bucket chain looking - * for a entry with an identical key. The first matching entry is returned. - * - * Returns the first entry on which the compare function returned true. - */ -static inline void *rhashtable_lookup_fast( +/* Internal function, do not use. */ +static inline struct rhash_head *__rhashtable_lookup( struct rhashtable *ht, const void *key, const struct rhashtable_params params) { @@ -537,8 +528,6 @@ static inline void *rhashtable_lookup_fast( struct rhash_head *he; unsigned int hash; - rcu_read_lock(); - tbl = rht_dereference_rcu(ht->tbl, ht); restart: hash = rht_key_hashfn(ht, tbl, key, params); @@ -547,8 +536,7 @@ restart: params.obj_cmpfn(&arg, rht_obj(ht, he)) : rhashtable_compare(&arg, rht_obj(ht, he))) continue; - rcu_read_unlock(); - return rht_obj(ht, he); + return he; } /* Ensure we see any new tables. */ @@ -557,13 +545,64 @@ restart: tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (unlikely(tbl)) goto restart; - rcu_read_unlock(); return NULL; } -/* Internal function, please use rhashtable_insert_fast() instead */ -static inline int __rhashtable_insert_fast( +/** + * rhashtable_lookup - search hash table + * @ht: hash table + * @key: the pointer to the key + * @params: hash table parameters + * + * Computes the hash value for the key and traverses the bucket chain looking + * for a entry with an identical key. The first matching entry is returned. + * + * This must only be called under the RCU read lock. + * + * Returns the first entry on which the compare function returned true. + */ +static inline void *rhashtable_lookup( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + struct rhash_head *he = __rhashtable_lookup(ht, key, params); + + return he ? rht_obj(ht, he) : NULL; +} + +/** + * rhashtable_lookup_fast - search hash table, without RCU read lock + * @ht: hash table + * @key: the pointer to the key + * @params: hash table parameters + * + * Computes the hash value for the key and traverses the bucket chain looking + * for a entry with an identical key. The first matching entry is returned. + * + * Only use this function when you have other mechanisms guaranteeing + * that the object won't go away after the RCU read lock is released. + * + * Returns the first entry on which the compare function returned true. + */ +static inline void *rhashtable_lookup_fast( + struct rhashtable *ht, const void *key, + const struct rhashtable_params params) +{ + void *obj; + + rcu_read_lock(); + obj = rhashtable_lookup(ht, key, params); + rcu_read_unlock(); + + return obj; +} + +/* Internal function, please use rhashtable_insert_fast() instead. This + * function returns the existing element already in hashes in there is a clash, + * otherwise it returns an error via ERR_PTR(). + */ +static inline void *__rhashtable_insert_fast( struct rhashtable *ht, const void *key, struct rhash_head *obj, const struct rhashtable_params params) { @@ -576,6 +615,7 @@ static inline int __rhashtable_insert_fast( spinlock_t *lock; unsigned int elasticity; unsigned int hash; + void *data = NULL; int err; restart: @@ -600,11 +640,14 @@ restart: new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (unlikely(new_tbl)) { - tbl = rhashtable_insert_slow(ht, key, obj, new_tbl); + tbl = rhashtable_insert_slow(ht, key, obj, new_tbl, &data); if (!IS_ERR_OR_NULL(tbl)) goto slow_path; err = PTR_ERR(tbl); + if (err == -EEXIST) + err = 0; + goto out; } @@ -618,25 +661,25 @@ slow_path: err = rhashtable_insert_rehash(ht, tbl); rcu_read_unlock(); if (err) - return err; + return ERR_PTR(err); goto restart; } - err = -EEXIST; + err = 0; elasticity = ht->elasticity; rht_for_each(head, tbl, hash) { if (key && unlikely(!(params.obj_cmpfn ? params.obj_cmpfn(&arg, rht_obj(ht, head)) : - rhashtable_compare(&arg, rht_obj(ht, head))))) + rhashtable_compare(&arg, rht_obj(ht, head))))) { + data = rht_obj(ht, head); goto out; + } if (!--elasticity) goto slow_path; } - err = 0; - head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); RCU_INIT_POINTER(obj->next, head); @@ -651,7 +694,7 @@ out: spin_unlock_bh(lock); rcu_read_unlock(); - return err; + return err ? ERR_PTR(err) : data; } /** @@ -674,7 +717,13 @@ static inline int rhashtable_insert_fast( struct rhashtable *ht, struct rhash_head *obj, const struct rhashtable_params params) { - return __rhashtable_insert_fast(ht, NULL, obj, params); + void *ret; + + ret = __rhashtable_insert_fast(ht, NULL, obj, params); + if (IS_ERR(ret)) + return PTR_ERR(ret); + + return ret == NULL ? 0 : -EEXIST; } /** @@ -703,11 +752,15 @@ static inline int rhashtable_lookup_insert_fast( const struct rhashtable_params params) { const char *key = rht_obj(ht, obj); + void *ret; BUG_ON(ht->p.obj_hashfn); - return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, - params); + ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params); + if (IS_ERR(ret)) + return PTR_ERR(ret); + + return ret == NULL ? 0 : -EEXIST; } /** @@ -736,6 +789,32 @@ static inline int rhashtable_lookup_insert_key( struct rhashtable *ht, const void *key, struct rhash_head *obj, const struct rhashtable_params params) { + void *ret; + + BUG_ON(!ht->p.obj_hashfn || !key); + + ret = __rhashtable_insert_fast(ht, key, obj, params); + if (IS_ERR(ret)) + return PTR_ERR(ret); + + return ret == NULL ? 0 : -EEXIST; +} + +/** + * rhashtable_lookup_get_insert_key - lookup and insert object into hash table + * @ht: hash table + * @obj: pointer to hash head inside object + * @params: hash table parameters + * @data: pointer to element data already in hashes + * + * Just like rhashtable_lookup_insert_key(), but this function returns the + * object if it exists, NULL if it does not and the insertion was successful, + * and an ERR_PTR otherwise. + */ +static inline void *rhashtable_lookup_get_insert_key( + struct rhashtable *ht, const void *key, struct rhash_head *obj, + const struct rhashtable_params params) +{ BUG_ON(!ht->p.obj_hashfn || !key); return __rhashtable_insert_fast(ht, key, obj, params); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a490dd718654..502787c29ce9 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -556,9 +556,14 @@ struct sk_buff { struct skb_mstamp skb_mstamp; }; }; - struct rb_node rbnode; /* used in netem & tcp stack */ + struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */ }; - struct sock *sk; + + union { + struct sock *sk; + int ip_defrag_offset; + }; + struct net_device *dev; /* @@ -2273,7 +2278,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) kfree_skb(skb); } -void skb_rbtree_purge(struct rb_root *root); +unsigned int skb_rbtree_purge(struct rb_root *root); void *netdev_alloc_frag(unsigned int fragsz); @@ -2791,6 +2796,7 @@ static inline unsigned char *skb_push_rcsum(struct sk_buff *skb, return skb->data; } +int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); /** * pskb_trim_rcsum - trim received skb and update checksum * @skb: buffer to trim @@ -2798,15 +2804,14 @@ static inline unsigned char *skb_push_rcsum(struct sk_buff *skb, * * This is exactly the same as pskb_trim except that it ensures the * checksum of received packets are still valid after the operation. + * It can change skb pointers. */ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) { if (likely(len >= skb->len)) return 0; - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->ip_summed = CHECKSUM_NONE; - return __pskb_trim(skb, len); + return pskb_trim_rcsum_slow(skb, len); } #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index c26a6e4dc306..6260ec146142 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -1,13 +1,19 @@ #ifndef __NET_FRAG_H__ #define __NET_FRAG_H__ +#include <linux/rhashtable.h> + struct netns_frags { - /* Keep atomic mem on separate cachelines in structs that include it */ - atomic_t mem ____cacheline_aligned_in_smp; /* sysctls */ + long high_thresh; + long low_thresh; int timeout; - int high_thresh; - int low_thresh; + struct inet_frags *f; + + struct rhashtable rhashtable ____cacheline_aligned_in_smp; + + /* Keep atomic mem on separate cachelines in structs that include it */ + atomic_long_t mem ____cacheline_aligned_in_smp; }; /** @@ -23,74 +29,68 @@ enum { INET_FRAG_COMPLETE = BIT(2), }; +struct frag_v4_compare_key { + __be32 saddr; + __be32 daddr; + u32 user; + u32 vif; + __be16 id; + u16 protocol; +}; + +struct frag_v6_compare_key { + struct in6_addr saddr; + struct in6_addr daddr; + u32 user; + __be32 id; + u32 iif; +}; + /** * struct inet_frag_queue - fragment queue * - * @lock: spinlock protecting the queue + * @node: rhash node + * @key: keys identifying this frag. * @timer: queue expiration timer - * @list: hash bucket list + * @lock: spinlock protecting this frag * @refcnt: reference count of the queue * @fragments: received fragments head + * @rb_fragments: received fragments rb-tree root * @fragments_tail: received fragments tail + * @last_run_head: the head of the last "run". see ip_fragment.c * @stamp: timestamp of the last received fragment * @len: total length of the original datagram * @meat: length of received fragments so far * @flags: fragment queue flags * @max_size: maximum received fragment size * @net: namespace that this frag belongs to - * @list_evictor: list of queues to forcefully evict (e.g. due to low memory) + * @rcu: rcu head for freeing deferall */ struct inet_frag_queue { - spinlock_t lock; + struct rhash_head node; + union { + struct frag_v4_compare_key v4; + struct frag_v6_compare_key v6; + } key; struct timer_list timer; - struct hlist_node list; + spinlock_t lock; atomic_t refcnt; - struct sk_buff *fragments; + struct sk_buff *fragments; /* Used in IPv6. */ + struct rb_root rb_fragments; /* Used in IPv4. */ struct sk_buff *fragments_tail; + struct sk_buff *last_run_head; ktime_t stamp; int len; int meat; __u8 flags; u16 max_size; - struct netns_frags *net; - struct hlist_node list_evictor; -}; - -#define INETFRAGS_HASHSZ 1024 - -/* averaged: - * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / - * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or - * struct frag_queue)) - */ -#define INETFRAGS_MAXDEPTH 128 - -struct inet_frag_bucket { - struct hlist_head chain; - spinlock_t chain_lock; + struct netns_frags *net; + struct rcu_head rcu; }; struct inet_frags { - struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; - - struct work_struct frags_work; - unsigned int next_bucket; - unsigned long last_rebuild_jiffies; - bool rebuild; - - /* The first call to hashfn is responsible to initialize - * rnd. This is best done with net_get_random_once. - * - * rnd_seqlock is used to let hash insertion detect - * when it needs to re-lookup the hash chain to use. - */ - u32 rnd; - seqlock_t rnd_seqlock; int qsize; - unsigned int (*hashfn)(const struct inet_frag_queue *); - bool (*match)(const struct inet_frag_queue *q, - const void *arg); void (*constructor)(struct inet_frag_queue *q, const void *arg); void (*destructor)(struct inet_frag_queue *); @@ -98,56 +98,47 @@ struct inet_frags { void (*frag_expire)(unsigned long data); struct kmem_cache *frags_cachep; const char *frags_cache_name; + struct rhashtable_params rhash_params; }; int inet_frags_init(struct inet_frags *); void inet_frags_fini(struct inet_frags *); -static inline void inet_frags_init_net(struct netns_frags *nf) +static inline int inet_frags_init_net(struct netns_frags *nf) { - atomic_set(&nf->mem, 0); + atomic_long_set(&nf->mem, 0); + return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params); } -void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f); +void inet_frags_exit_net(struct netns_frags *nf); -void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); -void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f); -struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, - struct inet_frags *f, void *key, unsigned int hash); +void inet_frag_kill(struct inet_frag_queue *q); +void inet_frag_destroy(struct inet_frag_queue *q); +struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key); -void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, - const char *prefix); +/* Free all skbs in the queue; return the sum of their truesizes. */ +unsigned int inet_frag_rbtree_purge(struct rb_root *root); -static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) +static inline void inet_frag_put(struct inet_frag_queue *q) { if (atomic_dec_and_test(&q->refcnt)) - inet_frag_destroy(q, f); -} - -static inline bool inet_frag_evicting(struct inet_frag_queue *q) -{ - return !hlist_unhashed(&q->list_evictor); + inet_frag_destroy(q); } /* Memory Tracking Functions. */ -static inline int frag_mem_limit(struct netns_frags *nf) -{ - return atomic_read(&nf->mem); -} - -static inline void sub_frag_mem_limit(struct netns_frags *nf, int i) +static inline long frag_mem_limit(const struct netns_frags *nf) { - atomic_sub(i, &nf->mem); + return atomic_long_read(&nf->mem); } -static inline void add_frag_mem_limit(struct netns_frags *nf, int i) +static inline void sub_frag_mem_limit(struct netns_frags *nf, long val) { - atomic_add(i, &nf->mem); + atomic_long_sub(val, &nf->mem); } -static inline int sum_frag_mem_limit(struct netns_frags *nf) +static inline void add_frag_mem_limit(struct netns_frags *nf, long val) { - return atomic_read(&nf->mem); + atomic_long_add(val, &nf->mem); } /* RFC 3168 support : diff --git a/include/net/ip.h b/include/net/ip.h index 81c7408deb83..1ef3fce1ecbf 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -527,7 +527,6 @@ static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *s return skb; } #endif -int ip_frag_mem(struct net *net); /* * Functions provided by ip_forward.c diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 2a25b53cd427..f6ff83b2ac87 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -200,7 +200,7 @@ int fib_table_insert(struct fib_table *, struct fib_config *); int fib_table_delete(struct fib_table *, struct fib_config *); int fib_table_dump(struct fib_table *table, struct sk_buff *skb, struct netlink_callback *cb); -int fib_table_flush(struct fib_table *table); +int fib_table_flush(struct fib_table *table, bool flush_all); struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); void fib_table_flush_external(struct fib_table *table); void fib_free_table(struct fib_table *tb); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 0e01d570fa22..c07cf9596b6f 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -320,13 +320,6 @@ static inline bool ipv6_accept_ra(struct inet6_dev *idev) idev->cnf.accept_ra; } -#if IS_ENABLED(CONFIG_IPV6) -static inline int ip6_frag_mem(struct net *net) -{ - return sum_frag_mem_limit(&net->ipv6.frags); -} -#endif - #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */ #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */ #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */ @@ -505,17 +498,8 @@ enum ip6_defrag_users { __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, }; -struct ip6_create_arg { - __be32 id; - u32 user; - const struct in6_addr *src; - const struct in6_addr *dst; - int iif; - u8 ecn; -}; - void ip6_frag_init(struct inet_frag_queue *q, const void *a); -bool ip6_frag_match(const struct inet_frag_queue *q, const void *a); +extern const struct rhashtable_params ip6_rhash_params; /* * Equivalent of ipv4 struct ip @@ -523,19 +507,13 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a); struct frag_queue { struct inet_frag_queue q; - __be32 id; /* fragment id */ - u32 user; - struct in6_addr saddr; - struct in6_addr daddr; - int iif; unsigned int csum; __u16 nhoffset; u8 ecn; }; -void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq, - struct inet_frags *frags); +void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq); static inline bool ipv6_addr_any(const struct in6_addr *a) { diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 25a9ad8bcef1..9de808ebce05 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -55,6 +55,7 @@ enum IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */ IPSTATS_MIB_CEPKTS, /* InCEPkts */ + IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */ __IPSTATS_MIB_MAX }; diff --git a/kernel/exit.c b/kernel/exit.c index fc82e495b729..8e288e8e9ca3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -457,12 +457,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p) return NULL; } -static struct task_struct *find_child_reaper(struct task_struct *father) +static struct task_struct *find_child_reaper(struct task_struct *father, + struct list_head *dead) __releases(&tasklist_lock) __acquires(&tasklist_lock) { struct pid_namespace *pid_ns = task_active_pid_ns(father); struct task_struct *reaper = pid_ns->child_reaper; + struct task_struct *p, *n; if (likely(reaper != father)) return reaper; @@ -478,6 +480,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father) panic("Attempted to kill init! exitcode=0x%08x\n", father->signal->group_exit_code ?: father->exit_code); } + + list_for_each_entry_safe(p, n, dead, ptrace_entry) { + list_del_init(&p->ptrace_entry); + release_task(p); + } + zap_pid_ns_processes(pid_ns); write_lock_irq(&tasklist_lock); @@ -564,7 +572,7 @@ static void forget_original_parent(struct task_struct *father, exit_ptrace(father, dead); /* Can drop and reacquire tasklist_lock */ - reaper = find_child_reaper(father); + reaper = find_child_reaper(father, dead); if (list_empty(&father->children)) return; diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3decfbc88308..c45082b6fdd2 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3825,7 +3825,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp) continue; rdp = per_cpu_ptr(rsp->rda, cpu); pr_cont(" %d-%c%c%c", cpu, - "O."[cpu_online(cpu)], + "O."[!!cpu_online(cpu)], "o."[!!(rdp->grpmask & rnp->expmaskinit)], "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); } diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 37ea94b636a3..7bb8649429bf 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -250,8 +250,10 @@ static int rhashtable_rehash_table(struct rhashtable *ht) if (!new_tbl) return 0; - for (old_hash = 0; old_hash < old_tbl->size; old_hash++) + for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { rhashtable_rehash_chain(ht, old_hash); + cond_resched(); + } /* Publish the new table pointer. */ rcu_assign_pointer(ht->tbl, new_tbl); @@ -441,7 +443,8 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj, - struct bucket_table *tbl) + struct bucket_table *tbl, + void **data) { struct rhash_head *head; unsigned int hash; @@ -452,8 +455,11 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); err = -EEXIST; - if (key && rhashtable_lookup_fast(ht, key, ht->p)) - goto exit; + if (key) { + *data = rhashtable_lookup_fast(ht, key, ht->p); + if (*data) + goto exit; + } err = -E2BIG; if (unlikely(rht_grow_above_max(ht, tbl))) @@ -838,6 +844,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; + cond_resched(); for (pos = rht_dereference(tbl->buckets[i], ht), next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 67237b7cb177..1ba63d3477cb 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -543,6 +543,13 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p, * still freeing memory. */ read_lock(&tasklist_lock); + + /* + * The task 'p' might have already exited before reaching here. The + * put_task_struct() will free task_struct 'p' while the loop still try + * to access the field of 'p', so, get an extra reference. + */ + get_task_struct(p); for_each_thread(p, t) { list_for_each_entry(child, &t->children, sibling) { unsigned int child_points; @@ -562,6 +569,7 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p, } } } + put_task_struct(p); read_unlock(&tasklist_lock); p = find_lock_task_mm(victim); diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index fcdb86dd5a23..c21209aada8c 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -39,10 +39,10 @@ static inline int should_deliver(const struct net_bridge_port *p, int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) { + skb_push(skb, ETH_HLEN); if (!is_skb_forwardable(skb->dev, skb)) goto drop; - skb_push(skb, ETH_HLEN); br_drop_fake_rtable(skb); skb_sender_cpu_clear(skb); @@ -88,12 +88,11 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) skb->dev = to->dev; if (unlikely(netpoll_tx_running(to->br->dev))) { + skb_push(skb, ETH_HLEN); if (!is_skb_forwardable(skb->dev, skb)) kfree_skb(skb); - else { - skb_push(skb, ETH_HLEN); + else br_netpoll_send_skb(to, skb); - } return; } diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c index d61f56efc8dc..69dfd212e50d 100644 --- a/net/bridge/br_netfilter_ipv6.c +++ b/net/bridge/br_netfilter_ipv6.c @@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb) IPSTATS_MIB_INDISCARDS); goto drop; } + hdr = ipv6_hdr(skb); } if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb)) goto drop; diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index fdba3d9fbff3..6e48aa69fa24 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -192,6 +192,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook) pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) return false; + ip6h = ipv6_hdr(skb); thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) return false; diff --git a/net/can/bcm.c b/net/can/bcm.c index 4ccfd356baed..1f15622d3c65 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -67,6 +67,9 @@ */ #define MAX_NFRAMES 256 +/* limit timers to 400 days for sending/timeouts */ +#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60) + /* use of last_frames[index].can_dlc */ #define RX_RECV 0x40 /* received data for this element */ #define RX_THR 0x80 /* element not been sent due to throttle feature */ @@ -136,6 +139,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); } +/* check limitations for timeval provided by user */ +static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head) +{ + if ((msg_head->ival1.tv_sec < 0) || + (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) || + (msg_head->ival1.tv_usec < 0) || + (msg_head->ival1.tv_usec >= USEC_PER_SEC) || + (msg_head->ival2.tv_sec < 0) || + (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) || + (msg_head->ival2.tv_usec < 0) || + (msg_head->ival2.tv_usec >= USEC_PER_SEC)) + return true; + + return false; +} + #define CFSIZ sizeof(struct can_frame) #define OPSIZ sizeof(struct bcm_op) #define MHSIZ sizeof(struct bcm_msg_head) @@ -855,6 +874,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) return -EINVAL; + /* check timeval limitations */ + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) + return -EINVAL; + /* check the given can_id */ op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex); @@ -1020,6 +1043,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, (!(msg_head->can_id & CAN_RTR_FLAG)))) return -EINVAL; + /* check timeval limitations */ + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) + return -EINVAL; + /* check the given can_id */ op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex); if (op) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2e486ab7c878..d91c7b43383e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1528,6 +1528,21 @@ done: } EXPORT_SYMBOL(___pskb_trim); +/* Note : use pskb_trim_rcsum() instead of calling this directly + */ +int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) { + int delta = skb->len - len; + + skb->csum = csum_block_sub(skb->csum, + skb_checksum(skb, len, delta, 0), + len); + } + return __pskb_trim(skb, len); +} +EXPORT_SYMBOL(pskb_trim_rcsum_slow); + /** * __pskb_pull_tail - advance tail of skb header * @skb: buffer to reallocate @@ -2406,23 +2421,27 @@ EXPORT_SYMBOL(skb_queue_purge); /** * skb_rbtree_purge - empty a skb rbtree * @root: root of the rbtree to empty + * Return value: the sum of truesizes of all purged skbs. * * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from * the list and one reference dropped. This function does not take * any lock. Synchronization should be handled by the caller (e.g., TCP * out-of-order queue is protected by the socket lock). */ -void skb_rbtree_purge(struct rb_root *root) +unsigned int skb_rbtree_purge(struct rb_root *root) { struct rb_node *p = rb_first(root); + unsigned int sum = 0; while (p) { struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); p = rb_next(p); rb_erase(&skb->rbnode, root); + sum += skb->truesize; kfree_skb(skb); } + return sum; } /** diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h index b4e17a7c0df0..fdbebe51446f 100644 --- a/net/ieee802154/6lowpan/6lowpan_i.h +++ b/net/ieee802154/6lowpan/6lowpan_i.h @@ -16,37 +16,19 @@ typedef unsigned __bitwise__ lowpan_rx_result; #define LOWPAN_DISPATCH_FRAG1 0xc0 #define LOWPAN_DISPATCH_FRAGN 0xe0 -struct lowpan_create_arg { +struct frag_lowpan_compare_key { u16 tag; u16 d_size; - const struct ieee802154_addr *src; - const struct ieee802154_addr *dst; + struct ieee802154_addr src; + struct ieee802154_addr dst; }; -/* Equivalent of ipv4 struct ip +/* Equivalent of ipv4 struct ipq */ struct lowpan_frag_queue { struct inet_frag_queue q; - - u16 tag; - u16 d_size; - struct ieee802154_addr saddr; - struct ieee802154_addr daddr; }; -static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a) -{ - switch (a->mode) { - case IEEE802154_ADDR_LONG: - return (((__force u64)a->extended_addr) >> 32) ^ - (((__force u64)a->extended_addr) & 0xffffffff); - case IEEE802154_ADDR_SHORT: - return (__force u32)(a->short_addr); - default: - return 0; - } -} - /* private device info */ struct lowpan_dev_info { struct net_device *wdev; /* wpan device ptr */ diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index 12e8cf4bda9f..6183730d38db 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -37,47 +37,15 @@ static struct inet_frags lowpan_frags; static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, struct net_device *ldev); -static unsigned int lowpan_hash_frag(u16 tag, u16 d_size, - const struct ieee802154_addr *saddr, - const struct ieee802154_addr *daddr) -{ - net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd)); - return jhash_3words(ieee802154_addr_hash(saddr), - ieee802154_addr_hash(daddr), - (__force u32)(tag + (d_size << 16)), - lowpan_frags.rnd); -} - -static unsigned int lowpan_hashfn(const struct inet_frag_queue *q) -{ - const struct lowpan_frag_queue *fq; - - fq = container_of(q, struct lowpan_frag_queue, q); - return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr); -} - -static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a) -{ - const struct lowpan_frag_queue *fq; - const struct lowpan_create_arg *arg = a; - - fq = container_of(q, struct lowpan_frag_queue, q); - return fq->tag == arg->tag && fq->d_size == arg->d_size && - ieee802154_addr_equal(&fq->saddr, arg->src) && - ieee802154_addr_equal(&fq->daddr, arg->dst); -} - static void lowpan_frag_init(struct inet_frag_queue *q, const void *a) { - const struct lowpan_create_arg *arg = a; + const struct frag_lowpan_compare_key *key = a; struct lowpan_frag_queue *fq; fq = container_of(q, struct lowpan_frag_queue, q); - fq->tag = arg->tag; - fq->d_size = arg->d_size; - fq->saddr = *arg->src; - fq->daddr = *arg->dst; + BUILD_BUG_ON(sizeof(*key) > sizeof(q->key)); + memcpy(&q->key, key, sizeof(*key)); } static void lowpan_frag_expire(unsigned long data) @@ -93,10 +61,10 @@ static void lowpan_frag_expire(unsigned long data) if (fq->q.flags & INET_FRAG_COMPLETE) goto out; - inet_frag_kill(&fq->q, &lowpan_frags); + inet_frag_kill(&fq->q); out: spin_unlock(&fq->q.lock); - inet_frag_put(&fq->q, &lowpan_frags); + inet_frag_put(&fq->q); } static inline struct lowpan_frag_queue * @@ -104,25 +72,20 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb, const struct ieee802154_addr *src, const struct ieee802154_addr *dst) { - struct inet_frag_queue *q; - struct lowpan_create_arg arg; - unsigned int hash; struct netns_ieee802154_lowpan *ieee802154_lowpan = net_ieee802154_lowpan(net); + struct frag_lowpan_compare_key key = {}; + struct inet_frag_queue *q; - arg.tag = cb->d_tag; - arg.d_size = cb->d_size; - arg.src = src; - arg.dst = dst; - - hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst); + key.tag = cb->d_tag; + key.d_size = cb->d_size; + key.src = *src; + key.dst = *dst; - q = inet_frag_find(&ieee802154_lowpan->frags, - &lowpan_frags, &arg, hash); - if (IS_ERR_OR_NULL(q)) { - inet_frag_maybe_warn_overflow(q, pr_fmt()); + q = inet_frag_find(&ieee802154_lowpan->frags, &key); + if (!q) return NULL; - } + return container_of(q, struct lowpan_frag_queue, q); } @@ -229,7 +192,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, struct sk_buff *fp, *head = fq->q.fragments; int sum_truesize; - inet_frag_kill(&fq->q, &lowpan_frags); + inet_frag_kill(&fq->q); /* Make the one we just received the head. */ if (prev) { @@ -408,7 +371,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type) struct lowpan_frag_queue *fq; struct net *net = dev_net(skb->dev); struct lowpan_802154_cb *cb = lowpan_802154_cb(skb); - struct ieee802154_hdr hdr; + struct ieee802154_hdr hdr = {}; int err; if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) @@ -437,7 +400,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type) ret = lowpan_frag_queue(fq, skb, frag_type); spin_unlock(&fq->q.lock); - inet_frag_put(&fq->q, &lowpan_frags); + inet_frag_put(&fq->q); return ret; } @@ -447,24 +410,22 @@ err: } #ifdef CONFIG_SYSCTL -static int zero; static struct ctl_table lowpan_frags_ns_ctl_table[] = { { .procname = "6lowpanfrag_high_thresh", .data = &init_net.ieee802154_lowpan.frags.high_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &init_net.ieee802154_lowpan.frags.low_thresh }, { .procname = "6lowpanfrag_low_thresh", .data = &init_net.ieee802154_lowpan.frags.low_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, + .proc_handler = proc_doulongvec_minmax, .extra2 = &init_net.ieee802154_lowpan.frags.high_thresh }, { @@ -580,14 +541,20 @@ static int __net_init lowpan_frags_init_net(struct net *net) { struct netns_ieee802154_lowpan *ieee802154_lowpan = net_ieee802154_lowpan(net); + int res; ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH; ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH; ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT; + ieee802154_lowpan->frags.f = &lowpan_frags; - inet_frags_init_net(&ieee802154_lowpan->frags); - - return lowpan_frags_ns_sysctl_register(net); + res = inet_frags_init_net(&ieee802154_lowpan->frags); + if (res < 0) + return res; + res = lowpan_frags_ns_sysctl_register(net); + if (res < 0) + inet_frags_exit_net(&ieee802154_lowpan->frags); + return res; } static void __net_exit lowpan_frags_exit_net(struct net *net) @@ -596,7 +563,7 @@ static void __net_exit lowpan_frags_exit_net(struct net *net) net_ieee802154_lowpan(net); lowpan_frags_ns_sysctl_unregister(net); - inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags); + inet_frags_exit_net(&ieee802154_lowpan->frags); } static struct pernet_operations lowpan_frags_ops = { @@ -604,33 +571,64 @@ static struct pernet_operations lowpan_frags_ops = { .exit = lowpan_frags_exit_net, }; -int __init lowpan_net_frag_init(void) +static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed) { - int ret; + return jhash2(data, + sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed); +} - ret = lowpan_frags_sysctl_register(); - if (ret) - return ret; +static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed) +{ + const struct inet_frag_queue *fq = data; - ret = register_pernet_subsys(&lowpan_frags_ops); - if (ret) - goto err_pernet; + return jhash2((const u32 *)&fq->key, + sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed); +} + +static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) +{ + const struct frag_lowpan_compare_key *key = arg->key; + const struct inet_frag_queue *fq = ptr; + + return !!memcmp(&fq->key, key, sizeof(*key)); +} + +static const struct rhashtable_params lowpan_rhash_params = { + .head_offset = offsetof(struct inet_frag_queue, node), + .hashfn = lowpan_key_hashfn, + .obj_hashfn = lowpan_obj_hashfn, + .obj_cmpfn = lowpan_obj_cmpfn, + .automatic_shrinking = true, +}; + +int __init lowpan_net_frag_init(void) +{ + int ret; - lowpan_frags.hashfn = lowpan_hashfn; lowpan_frags.constructor = lowpan_frag_init; lowpan_frags.destructor = NULL; lowpan_frags.skb_free = NULL; lowpan_frags.qsize = sizeof(struct frag_queue); - lowpan_frags.match = lowpan_frag_match; lowpan_frags.frag_expire = lowpan_frag_expire; lowpan_frags.frags_cache_name = lowpan_frags_cache_name; + lowpan_frags.rhash_params = lowpan_rhash_params; ret = inet_frags_init(&lowpan_frags); if (ret) - goto err_pernet; + goto out; + ret = lowpan_frags_sysctl_register(); + if (ret) + goto err_sysctl; + + ret = register_pernet_subsys(&lowpan_frags_ops); + if (ret) + goto err_pernet; +out: return ret; err_pernet: lowpan_frags_sysctl_unregister(); +err_sysctl: + inet_frags_fini(&lowpan_frags); return ret; } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 860e33dd4030..8dc9073d4a76 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -187,7 +187,7 @@ static void fib_flush(struct net *net) struct fib_table *tb; hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) - flushed += fib_table_flush(tb); + flushed += fib_table_flush(tb, false); } if (flushed) @@ -1278,7 +1278,7 @@ static void ip_fib_net_exit(struct net *net) hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { hlist_del(&tb->tb_hlist); - fib_table_flush(tb); + fib_table_flush(tb, true); fib_free_table(tb); } } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index fa59bc35dbb5..9b14f8958dcc 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1806,7 +1806,7 @@ void fib_table_flush_external(struct fib_table *tb) } /* Caller must hold RTNL. */ -int fib_table_flush(struct fib_table *tb) +int fib_table_flush(struct fib_table *tb, bool flush_all) { struct trie *t = (struct trie *)tb->tb_data; struct key_vector *pn = t->kv; @@ -1850,7 +1850,17 @@ int fib_table_flush(struct fib_table *tb) hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { struct fib_info *fi = fa->fa_info; - if (!fi || !(fi->fib_flags & RTNH_F_DEAD)) { + if (!fi || + (!(fi->fib_flags & RTNH_F_DEAD) && + !fib_props[fa->fa_type].error)) { + slen = fa->fa_slen; + continue; + } + + /* Do not flush error routes if network namespace is + * not being dismantled + */ + if (!flush_all && fib_props[fa->fa_type].error) { slen = fa->fa_slen; continue; } diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index b2001b20e029..c03e5f5859e1 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -25,12 +25,6 @@ #include <net/inet_frag.h> #include <net/inet_ecn.h> -#define INETFRAGS_EVICT_BUCKETS 128 -#define INETFRAGS_EVICT_MAX 512 - -/* don't rebuild inetfrag table with new secret more often than this */ -#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ) - /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements * Value : 0xff if frame should be dropped. * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field @@ -52,157 +46,8 @@ const u8 ip_frag_ecn_table[16] = { }; EXPORT_SYMBOL(ip_frag_ecn_table); -static unsigned int -inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q) -{ - return f->hashfn(q) & (INETFRAGS_HASHSZ - 1); -} - -static bool inet_frag_may_rebuild(struct inet_frags *f) -{ - return time_after(jiffies, - f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL); -} - -static void inet_frag_secret_rebuild(struct inet_frags *f) -{ - int i; - - write_seqlock_bh(&f->rnd_seqlock); - - if (!inet_frag_may_rebuild(f)) - goto out; - - get_random_bytes(&f->rnd, sizeof(u32)); - - for (i = 0; i < INETFRAGS_HASHSZ; i++) { - struct inet_frag_bucket *hb; - struct inet_frag_queue *q; - struct hlist_node *n; - - hb = &f->hash[i]; - spin_lock(&hb->chain_lock); - - hlist_for_each_entry_safe(q, n, &hb->chain, list) { - unsigned int hval = inet_frag_hashfn(f, q); - - if (hval != i) { - struct inet_frag_bucket *hb_dest; - - hlist_del(&q->list); - - /* Relink to new hash chain. */ - hb_dest = &f->hash[hval]; - - /* This is the only place where we take - * another chain_lock while already holding - * one. As this will not run concurrently, - * we cannot deadlock on hb_dest lock below, if its - * already locked it will be released soon since - * other caller cannot be waiting for hb lock - * that we've taken above. - */ - spin_lock_nested(&hb_dest->chain_lock, - SINGLE_DEPTH_NESTING); - hlist_add_head(&q->list, &hb_dest->chain); - spin_unlock(&hb_dest->chain_lock); - } - } - spin_unlock(&hb->chain_lock); - } - - f->rebuild = false; - f->last_rebuild_jiffies = jiffies; -out: - write_sequnlock_bh(&f->rnd_seqlock); -} - -static bool inet_fragq_should_evict(const struct inet_frag_queue *q) -{ - if (!hlist_unhashed(&q->list_evictor)) - return false; - - return q->net->low_thresh == 0 || - frag_mem_limit(q->net) >= q->net->low_thresh; -} - -static unsigned int -inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb) -{ - struct inet_frag_queue *fq; - struct hlist_node *n; - unsigned int evicted = 0; - HLIST_HEAD(expired); - - spin_lock(&hb->chain_lock); - - hlist_for_each_entry_safe(fq, n, &hb->chain, list) { - if (!inet_fragq_should_evict(fq)) - continue; - - if (!del_timer(&fq->timer)) - continue; - - hlist_add_head(&fq->list_evictor, &expired); - ++evicted; - } - - spin_unlock(&hb->chain_lock); - - hlist_for_each_entry_safe(fq, n, &expired, list_evictor) - f->frag_expire((unsigned long) fq); - - return evicted; -} - -static void inet_frag_worker(struct work_struct *work) -{ - unsigned int budget = INETFRAGS_EVICT_BUCKETS; - unsigned int i, evicted = 0; - struct inet_frags *f; - - f = container_of(work, struct inet_frags, frags_work); - - BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ); - - local_bh_disable(); - - for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) { - evicted += inet_evict_bucket(f, &f->hash[i]); - i = (i + 1) & (INETFRAGS_HASHSZ - 1); - if (evicted > INETFRAGS_EVICT_MAX) - break; - } - - f->next_bucket = i; - - local_bh_enable(); - - if (f->rebuild && inet_frag_may_rebuild(f)) - inet_frag_secret_rebuild(f); -} - -static void inet_frag_schedule_worker(struct inet_frags *f) -{ - if (unlikely(!work_pending(&f->frags_work))) - schedule_work(&f->frags_work); -} - int inet_frags_init(struct inet_frags *f) { - int i; - - INIT_WORK(&f->frags_work, inet_frag_worker); - - for (i = 0; i < INETFRAGS_HASHSZ; i++) { - struct inet_frag_bucket *hb = &f->hash[i]; - - spin_lock_init(&hb->chain_lock); - INIT_HLIST_HEAD(&hb->chain); - } - - seqlock_init(&f->rnd_seqlock); - f->last_rebuild_jiffies = 0; f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0, NULL); if (!f->frags_cachep) @@ -214,73 +59,53 @@ EXPORT_SYMBOL(inet_frags_init); void inet_frags_fini(struct inet_frags *f) { - cancel_work_sync(&f->frags_work); + /* We must wait that all inet_frag_destroy_rcu() have completed. */ + rcu_barrier(); + kmem_cache_destroy(f->frags_cachep); + f->frags_cachep = NULL; } EXPORT_SYMBOL(inet_frags_fini); -void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f) +static void inet_frags_free_cb(void *ptr, void *arg) { - unsigned int seq; - int i; + struct inet_frag_queue *fq = ptr; - nf->low_thresh = 0; - -evict_again: - local_bh_disable(); - seq = read_seqbegin(&f->rnd_seqlock); - - for (i = 0; i < INETFRAGS_HASHSZ ; i++) - inet_evict_bucket(f, &f->hash[i]); - - local_bh_enable(); - cond_resched(); - - if (read_seqretry(&f->rnd_seqlock, seq) || - sum_frag_mem_limit(nf)) - goto evict_again; -} -EXPORT_SYMBOL(inet_frags_exit_net); - -static struct inet_frag_bucket * -get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f) -__acquires(hb->chain_lock) -{ - struct inet_frag_bucket *hb; - unsigned int seq, hash; - - restart: - seq = read_seqbegin(&f->rnd_seqlock); - - hash = inet_frag_hashfn(f, fq); - hb = &f->hash[hash]; + /* If we can not cancel the timer, it means this frag_queue + * is already disappearing, we have nothing to do. + * Otherwise, we own a refcount until the end of this function. + */ + if (!del_timer(&fq->timer)) + return; - spin_lock(&hb->chain_lock); - if (read_seqretry(&f->rnd_seqlock, seq)) { - spin_unlock(&hb->chain_lock); - goto restart; + spin_lock_bh(&fq->lock); + if (!(fq->flags & INET_FRAG_COMPLETE)) { + fq->flags |= INET_FRAG_COMPLETE; + atomic_dec(&fq->refcnt); } + spin_unlock_bh(&fq->lock); - return hb; + inet_frag_put(fq); } -static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) +void inet_frags_exit_net(struct netns_frags *nf) { - struct inet_frag_bucket *hb; + nf->high_thresh = 0; /* prevent creation of new frags */ - hb = get_frag_bucket_locked(fq, f); - hlist_del(&fq->list); - fq->flags |= INET_FRAG_COMPLETE; - spin_unlock(&hb->chain_lock); + rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL); } +EXPORT_SYMBOL(inet_frags_exit_net); -void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) +void inet_frag_kill(struct inet_frag_queue *fq) { if (del_timer(&fq->timer)) atomic_dec(&fq->refcnt); if (!(fq->flags & INET_FRAG_COMPLETE)) { - fq_unlink(fq, f); + struct netns_frags *nf = fq->net; + + fq->flags |= INET_FRAG_COMPLETE; + rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params); atomic_dec(&fq->refcnt); } } @@ -294,11 +119,23 @@ static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f, kfree_skb(skb); } -void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f) +static void inet_frag_destroy_rcu(struct rcu_head *head) +{ + struct inet_frag_queue *q = container_of(head, struct inet_frag_queue, + rcu); + struct inet_frags *f = q->net->f; + + if (f->destructor) + f->destructor(q); + kmem_cache_free(f->frags_cachep, q); +} + +void inet_frag_destroy(struct inet_frag_queue *q) { struct sk_buff *fp; struct netns_frags *nf; unsigned int sum, sum_truesize = 0; + struct inet_frags *f; WARN_ON(!(q->flags & INET_FRAG_COMPLETE)); WARN_ON(del_timer(&q->timer) != 0); @@ -306,64 +143,35 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f) /* Release all fragment data. */ fp = q->fragments; nf = q->net; - while (fp) { - struct sk_buff *xp = fp->next; - - sum_truesize += fp->truesize; - frag_kfree_skb(nf, f, fp); - fp = xp; + f = nf->f; + if (fp) { + do { + struct sk_buff *xp = fp->next; + + sum_truesize += fp->truesize; + frag_kfree_skb(nf, f, fp); + fp = xp; + } while (fp); + } else { + sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments); } sum = sum_truesize + f->qsize; - if (f->destructor) - f->destructor(q); - kmem_cache_free(f->frags_cachep, q); + call_rcu(&q->rcu, inet_frag_destroy_rcu); sub_frag_mem_limit(nf, sum); } EXPORT_SYMBOL(inet_frag_destroy); -static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, - struct inet_frag_queue *qp_in, - struct inet_frags *f, - void *arg) -{ - struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f); - struct inet_frag_queue *qp; - -#ifdef CONFIG_SMP - /* With SMP race we have to recheck hash table, because - * such entry could have been created on other cpu before - * we acquired hash bucket lock. - */ - hlist_for_each_entry(qp, &hb->chain, list) { - if (qp->net == nf && f->match(qp, arg)) { - atomic_inc(&qp->refcnt); - spin_unlock(&hb->chain_lock); - qp_in->flags |= INET_FRAG_COMPLETE; - inet_frag_put(qp_in, f); - return qp; - } - } -#endif - qp = qp_in; - if (!mod_timer(&qp->timer, jiffies + nf->timeout)) - atomic_inc(&qp->refcnt); - - atomic_inc(&qp->refcnt); - hlist_add_head(&qp->list, &hb->chain); - - spin_unlock(&hb->chain_lock); - - return qp; -} - static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, struct inet_frags *f, void *arg) { struct inet_frag_queue *q; + if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) + return NULL; + q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); if (!q) return NULL; @@ -374,75 +182,52 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, setup_timer(&q->timer, f->frag_expire, (unsigned long)q); spin_lock_init(&q->lock); - atomic_set(&q->refcnt, 1); + atomic_set(&q->refcnt, 3); return q; } static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, - struct inet_frags *f, - void *arg) + void *arg, + struct inet_frag_queue **prev) { + struct inet_frags *f = nf->f; struct inet_frag_queue *q; q = inet_frag_alloc(nf, f, arg); - if (!q) - return NULL; - - return inet_frag_intern(nf, q, f, arg); -} - -struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, - struct inet_frags *f, void *key, - unsigned int hash) -{ - struct inet_frag_bucket *hb; - struct inet_frag_queue *q; - int depth = 0; - - if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) { - inet_frag_schedule_worker(f); + if (!q) { + *prev = ERR_PTR(-ENOMEM); return NULL; } - - if (frag_mem_limit(nf) > nf->low_thresh) - inet_frag_schedule_worker(f); - - hash &= (INETFRAGS_HASHSZ - 1); - hb = &f->hash[hash]; - - spin_lock(&hb->chain_lock); - hlist_for_each_entry(q, &hb->chain, list) { - if (q->net == nf && f->match(q, key)) { - atomic_inc(&q->refcnt); - spin_unlock(&hb->chain_lock); - return q; - } - depth++; - } - spin_unlock(&hb->chain_lock); - - if (depth <= INETFRAGS_MAXDEPTH) - return inet_frag_create(nf, f, key); - - if (inet_frag_may_rebuild(f)) { - if (!f->rebuild) - f->rebuild = true; - inet_frag_schedule_worker(f); + mod_timer(&q->timer, jiffies + nf->timeout); + + *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key, + &q->node, f->rhash_params); + if (*prev) { + q->flags |= INET_FRAG_COMPLETE; + inet_frag_kill(q); + inet_frag_destroy(q); + return NULL; } - - return ERR_PTR(-ENOBUFS); + return q; } -EXPORT_SYMBOL(inet_frag_find); +EXPORT_SYMBOL(inet_frag_create); -void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, - const char *prefix) +/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */ +struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key) { - static const char msg[] = "inet_frag_find: Fragment hash bucket" - " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) - ". Dropping fragment.\n"; + struct inet_frag_queue *fq = NULL, *prev; - if (PTR_ERR(q) == -ENOBUFS) - net_dbg_ratelimited("%s%s", prefix, msg); + rcu_read_lock(); + prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); + if (!prev) + fq = inet_frag_create(nf, key, &prev); + if (prev && !IS_ERR(prev)) { + fq = prev; + if (!atomic_inc_not_zero(&fq->refcnt)) + fq = NULL; + } + rcu_read_unlock(); + return fq; } -EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); +EXPORT_SYMBOL(inet_frag_find); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 72915658a6b1..9b09a9b5a4fe 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -58,27 +58,64 @@ static int sysctl_ipfrag_max_dist __read_mostly = 64; static const char ip_frag_cache_name[] = "ip4-frags"; -struct ipfrag_skb_cb -{ +/* Use skb->cb to track consecutive/adjacent fragments coming at + * the end of the queue. Nodes in the rb-tree queue will + * contain "runs" of one or more adjacent fragments. + * + * Invariants: + * - next_frag is NULL at the tail of a "run"; + * - the head of a "run" has the sum of all fragment lengths in frag_run_len. + */ +struct ipfrag_skb_cb { struct inet_skb_parm h; - int offset; + struct sk_buff *next_frag; + int frag_run_len; }; -#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) +#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) + +static void ip4_frag_init_run(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb)); + + FRAG_CB(skb)->next_frag = NULL; + FRAG_CB(skb)->frag_run_len = skb->len; +} + +/* Append skb to the last "run". */ +static void ip4_frag_append_to_last_run(struct inet_frag_queue *q, + struct sk_buff *skb) +{ + RB_CLEAR_NODE(&skb->rbnode); + FRAG_CB(skb)->next_frag = NULL; + + FRAG_CB(q->last_run_head)->frag_run_len += skb->len; + FRAG_CB(q->fragments_tail)->next_frag = skb; + q->fragments_tail = skb; +} + +/* Create a new "run" with the skb. */ +static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb) +{ + if (q->last_run_head) + rb_link_node(&skb->rbnode, &q->last_run_head->rbnode, + &q->last_run_head->rbnode.rb_right); + else + rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node); + rb_insert_color(&skb->rbnode, &q->rb_fragments); + + ip4_frag_init_run(skb); + q->fragments_tail = skb; + q->last_run_head = skb; +} /* Describe an entry in the "incomplete datagrams" queue. */ struct ipq { struct inet_frag_queue q; - u32 user; - __be32 saddr; - __be32 daddr; - __be16 id; - u8 protocol; u8 ecn; /* RFC3168 support */ u16 max_df_size; /* largest frag with DF set seen */ int iif; - int vif; /* L3 master device index */ unsigned int rid; struct inet_peer *peer; }; @@ -90,49 +127,9 @@ static u8 ip4_frag_ecn(u8 tos) static struct inet_frags ip4_frags; -int ip_frag_mem(struct net *net) -{ - return sum_frag_mem_limit(&net->ipv4.frags); -} - -static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, - struct net_device *dev); - -struct ip4_create_arg { - struct iphdr *iph; - u32 user; - int vif; -}; +static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, + struct sk_buff *prev_tail, struct net_device *dev); -static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) -{ - net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd)); - return jhash_3words((__force u32)id << 16 | prot, - (__force u32)saddr, (__force u32)daddr, - ip4_frags.rnd); -} - -static unsigned int ip4_hashfn(const struct inet_frag_queue *q) -{ - const struct ipq *ipq; - - ipq = container_of(q, struct ipq, q); - return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); -} - -static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) -{ - const struct ipq *qp; - const struct ip4_create_arg *arg = a; - - qp = container_of(q, struct ipq, q); - return qp->id == arg->iph->id && - qp->saddr == arg->iph->saddr && - qp->daddr == arg->iph->daddr && - qp->protocol == arg->iph->protocol && - qp->user == arg->user && - qp->vif == arg->vif; -} static void ip4_frag_init(struct inet_frag_queue *q, const void *a) { @@ -141,17 +138,12 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a) frags); struct net *net = container_of(ipv4, struct net, ipv4); - const struct ip4_create_arg *arg = a; + const struct frag_v4_compare_key *key = a; - qp->protocol = arg->iph->protocol; - qp->id = arg->iph->id; - qp->ecn = ip4_frag_ecn(arg->iph->tos); - qp->saddr = arg->iph->saddr; - qp->daddr = arg->iph->daddr; - qp->vif = arg->vif; - qp->user = arg->user; + q->key.v4 = *key; + qp->ecn = 0; qp->peer = sysctl_ipfrag_max_dist ? - inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) : + inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) : NULL; } @@ -169,7 +161,7 @@ static void ip4_frag_free(struct inet_frag_queue *q) static void ipq_put(struct ipq *ipq) { - inet_frag_put(&ipq->q, &ip4_frags); + inet_frag_put(&ipq->q); } /* Kill ipq entry. It is not destroyed immediately, @@ -177,7 +169,7 @@ static void ipq_put(struct ipq *ipq) */ static void ipq_kill(struct ipq *ipq) { - inet_frag_kill(&ipq->q, &ip4_frags); + inet_frag_kill(&ipq->q); } static bool frag_expire_skip_icmp(u32 user) @@ -194,8 +186,11 @@ static bool frag_expire_skip_icmp(u32 user) */ static void ip_expire(unsigned long arg) { - struct ipq *qp; + const struct iphdr *iph; + struct sk_buff *head = NULL; struct net *net; + struct ipq *qp; + int err; qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); net = container_of(qp->q.net, struct net, ipv4.frags); @@ -208,51 +203,65 @@ static void ip_expire(unsigned long arg) ipq_kill(qp); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); - if (!inet_frag_evicting(&qp->q)) { - struct sk_buff *clone, *head = qp->q.fragments; - const struct iphdr *iph; - int err; - - IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); + if (!(qp->q.flags & INET_FRAG_FIRST_IN)) + goto out; - if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) + /* sk_buff::dev and sk_buff::rbnode are unionized. So we + * pull the head out of the tree in order to be able to + * deal with head->dev. + */ + if (qp->q.fragments) { + head = qp->q.fragments; + qp->q.fragments = head->next; + } else { + head = skb_rb_first(&qp->q.rb_fragments); + if (!head) goto out; + if (FRAG_CB(head)->next_frag) + rb_replace_node(&head->rbnode, + &FRAG_CB(head)->next_frag->rbnode, + &qp->q.rb_fragments); + else + rb_erase(&head->rbnode, &qp->q.rb_fragments); + memset(&head->rbnode, 0, sizeof(head->rbnode)); + barrier(); + } + if (head == qp->q.fragments_tail) + qp->q.fragments_tail = NULL; - head->dev = dev_get_by_index_rcu(net, qp->iif); - if (!head->dev) - goto out; + sub_frag_mem_limit(qp->q.net, head->truesize); + + head->dev = dev_get_by_index_rcu(net, qp->iif); + if (!head->dev) + goto out; - /* skb has no dst, perform route lookup again */ - iph = ip_hdr(head); - err = ip_route_input_noref(head, iph->daddr, iph->saddr, + /* skb has no dst, perform route lookup again */ + iph = ip_hdr(head); + err = ip_route_input_noref(head, iph->daddr, iph->saddr, iph->tos, head->dev); - if (err) - goto out; + if (err) + goto out; - /* Only an end host needs to send an ICMP - * "Fragment Reassembly Timeout" message, per RFC792. - */ - if (frag_expire_skip_icmp(qp->user) && - (skb_rtable(head)->rt_type != RTN_LOCAL)) - goto out; + /* Only an end host needs to send an ICMP + * "Fragment Reassembly Timeout" message, per RFC792. + */ + if (frag_expire_skip_icmp(qp->q.key.v4.user) && + (skb_rtable(head)->rt_type != RTN_LOCAL)) + goto out; - clone = skb_clone(head, GFP_ATOMIC); + spin_unlock(&qp->q.lock); + icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); + goto out_rcu_unlock; - /* Send an ICMP "Fragment Reassembly Timeout" message. */ - if (clone) { - spin_unlock(&qp->q.lock); - icmp_send(clone, ICMP_TIME_EXCEEDED, - ICMP_EXC_FRAGTIME, 0); - consume_skb(clone); - goto out_rcu_unlock; - } - } out: spin_unlock(&qp->q.lock); out_rcu_unlock: rcu_read_unlock(); + if (head) + kfree_skb(head); ipq_put(qp); } @@ -262,21 +271,20 @@ out_rcu_unlock: static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user, int vif) { + struct frag_v4_compare_key key = { + .saddr = iph->saddr, + .daddr = iph->daddr, + .user = user, + .vif = vif, + .id = iph->id, + .protocol = iph->protocol, + }; struct inet_frag_queue *q; - struct ip4_create_arg arg; - unsigned int hash; - - arg.iph = iph; - arg.user = user; - arg.vif = vif; - - hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); - q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); - if (IS_ERR_OR_NULL(q)) { - inet_frag_maybe_warn_overflow(q, pr_fmt()); + q = inet_frag_find(&net->ipv4.frags, &key); + if (!q) return NULL; - } + return container_of(q, struct ipq, q); } @@ -296,7 +304,7 @@ static int ip_frag_too_far(struct ipq *qp) end = atomic_inc_return(&peer->rid); qp->rid = end; - rc = qp->q.fragments && (end - start) > max; + rc = qp->q.fragments_tail && (end - start) > max; if (rc) { struct net *net; @@ -310,7 +318,6 @@ static int ip_frag_too_far(struct ipq *qp) static int ip_frag_reinit(struct ipq *qp) { - struct sk_buff *fp; unsigned int sum_truesize = 0; if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { @@ -318,21 +325,16 @@ static int ip_frag_reinit(struct ipq *qp) return -ETIMEDOUT; } - fp = qp->q.fragments; - do { - struct sk_buff *xp = fp->next; - - sum_truesize += fp->truesize; - kfree_skb(fp); - fp = xp; - } while (fp); + sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments); sub_frag_mem_limit(qp->q.net, sum_truesize); qp->q.flags = 0; qp->q.len = 0; qp->q.meat = 0; qp->q.fragments = NULL; + qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; + qp->q.last_run_head = NULL; qp->iif = 0; qp->ecn = 0; @@ -342,11 +344,13 @@ static int ip_frag_reinit(struct ipq *qp) /* Add new segment to existing queue. */ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) { - struct sk_buff *prev, *next; + struct net *net = container_of(qp->q.net, struct net, ipv4.frags); + struct rb_node **rbn, *parent; + struct sk_buff *skb1, *prev_tail; + int ihl, end, skb1_run_end; struct net_device *dev; unsigned int fragsize; int flags, offset; - int ihl, end; int err = -ENOENT; u8 ecn; @@ -405,94 +409,68 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) if (err) goto err; - /* Find out which fragments are in front and at the back of us - * in the chain of fragments so far. We must know where to put - * this fragment, right? - */ - prev = qp->q.fragments_tail; - if (!prev || FRAG_CB(prev)->offset < offset) { - next = NULL; - goto found; - } - prev = NULL; - for (next = qp->q.fragments; next != NULL; next = next->next) { - if (FRAG_CB(next)->offset >= offset) - break; /* bingo! */ - prev = next; - } - -found: - /* We found where to put this one. Check for overlap with - * preceding fragment, and, if needed, align things so that - * any overlaps are eliminated. + /* Note : skb->rbnode and skb->dev share the same location. */ + dev = skb->dev; + /* Makes sure compiler wont do silly aliasing games */ + barrier(); + + /* RFC5722, Section 4, amended by Errata ID : 3089 + * When reassembling an IPv6 datagram, if + * one or more its constituent fragments is determined to be an + * overlapping fragment, the entire datagram (and any constituent + * fragments) MUST be silently discarded. + * + * We do the same here for IPv4 (and increment an snmp counter) but + * we do not want to drop the whole queue in response to a duplicate + * fragment. */ - if (prev) { - int i = (FRAG_CB(prev)->offset + prev->len) - offset; - - if (i > 0) { - offset += i; - err = -EINVAL; - if (end <= offset) - goto err; - err = -ENOMEM; - if (!pskb_pull(skb, i)) - goto err; - if (skb->ip_summed != CHECKSUM_UNNECESSARY) - skb->ip_summed = CHECKSUM_NONE; - } - } - err = -ENOMEM; - - while (next && FRAG_CB(next)->offset < end) { - int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ - - if (i < next->len) { - /* Eat head of the next overlapped fragment - * and leave the loop. The next ones cannot overlap. - */ - if (!pskb_pull(next, i)) - goto err; - FRAG_CB(next)->offset += i; - qp->q.meat -= i; - if (next->ip_summed != CHECKSUM_UNNECESSARY) - next->ip_summed = CHECKSUM_NONE; - break; - } else { - struct sk_buff *free_it = next; - - /* Old fragment is completely overridden with - * new one drop it. - */ - next = next->next; - - if (prev) - prev->next = next; + err = -EINVAL; + /* Find out where to put this fragment. */ + prev_tail = qp->q.fragments_tail; + if (!prev_tail) + ip4_frag_create_run(&qp->q, skb); /* First fragment. */ + else if (prev_tail->ip_defrag_offset + prev_tail->len < end) { + /* This is the common case: skb goes to the end. */ + /* Detect and discard overlaps. */ + if (offset < prev_tail->ip_defrag_offset + prev_tail->len) + goto discard_qp; + if (offset == prev_tail->ip_defrag_offset + prev_tail->len) + ip4_frag_append_to_last_run(&qp->q, skb); + else + ip4_frag_create_run(&qp->q, skb); + } else { + /* Binary search. Note that skb can become the first fragment, + * but not the last (covered above). + */ + rbn = &qp->q.rb_fragments.rb_node; + do { + parent = *rbn; + skb1 = rb_to_skb(parent); + skb1_run_end = skb1->ip_defrag_offset + + FRAG_CB(skb1)->frag_run_len; + if (end <= skb1->ip_defrag_offset) + rbn = &parent->rb_left; + else if (offset >= skb1_run_end) + rbn = &parent->rb_right; + else if (offset >= skb1->ip_defrag_offset && + end <= skb1_run_end) + goto err; /* No new data, potential duplicate */ else - qp->q.fragments = next; - - qp->q.meat -= free_it->len; - sub_frag_mem_limit(qp->q.net, free_it->truesize); - kfree_skb(free_it); - } + goto discard_qp; /* Found an overlap */ + } while (*rbn); + /* Here we have parent properly set, and rbn pointing to + * one of its NULL left/right children. Insert skb. + */ + ip4_frag_init_run(skb); + rb_link_node(&skb->rbnode, parent, rbn); + rb_insert_color(&skb->rbnode, &qp->q.rb_fragments); } - FRAG_CB(skb)->offset = offset; - - /* Insert this fragment in the chain of fragments. */ - skb->next = next; - if (!next) - qp->q.fragments_tail = skb; - if (prev) - prev->next = skb; - else - qp->q.fragments = skb; - - dev = skb->dev; - if (dev) { + if (dev) qp->iif = dev->ifindex; - skb->dev = NULL; - } + skb->ip_defrag_offset = offset; + qp->q.stamp = skb->tstamp; qp->q.meat += skb->len; qp->ecn |= ecn; @@ -514,7 +492,7 @@ found: unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; - err = ip_frag_reasm(qp, prev, dev); + err = ip_frag_reasm(qp, skb, prev_tail, dev); skb->_skb_refdst = orefdst; return err; } @@ -522,20 +500,23 @@ found: skb_dst_drop(skb); return -EINPROGRESS; +discard_qp: + inet_frag_kill(&qp->q); + IP_INC_STATS_BH(net, IPSTATS_MIB_REASM_OVERLAPS); err: kfree_skb(skb); return err; } - /* Build a new IP datagram from all its fragments. */ - -static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, - struct net_device *dev) +static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, + struct sk_buff *prev_tail, struct net_device *dev) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct iphdr *iph; - struct sk_buff *fp, *head = qp->q.fragments; + struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments); + struct sk_buff **nextp; /* To build frag_list. */ + struct rb_node *rbn; int len; int ihlen; int err; @@ -549,26 +530,27 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, goto out_fail; } /* Make the one we just received the head. */ - if (prev) { - head = prev->next; - fp = skb_clone(head, GFP_ATOMIC); + if (head != skb) { + fp = skb_clone(skb, GFP_ATOMIC); if (!fp) goto out_nomem; - - fp->next = head->next; - if (!fp->next) + FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag; + if (RB_EMPTY_NODE(&skb->rbnode)) + FRAG_CB(prev_tail)->next_frag = fp; + else + rb_replace_node(&skb->rbnode, &fp->rbnode, + &qp->q.rb_fragments); + if (qp->q.fragments_tail == skb) qp->q.fragments_tail = fp; - prev->next = fp; - - skb_morph(head, qp->q.fragments); - head->next = qp->q.fragments->next; - - consume_skb(qp->q.fragments); - qp->q.fragments = head; + skb_morph(skb, head); + FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag; + rb_replace_node(&head->rbnode, &skb->rbnode, + &qp->q.rb_fragments); + consume_skb(head); + head = skb; } - WARN_ON(!head); - WARN_ON(FRAG_CB(head)->offset != 0); + WARN_ON(head->ip_defrag_offset != 0); /* Allocate a new buffer for the datagram. */ ihlen = ip_hdrlen(head); @@ -592,35 +574,61 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, clone = alloc_skb(0, GFP_ATOMIC); if (!clone) goto out_nomem; - clone->next = head->next; - head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; - head->data_len -= clone->len; - head->len -= clone->len; + head->truesize += clone->truesize; clone->csum = 0; clone->ip_summed = head->ip_summed; add_frag_mem_limit(qp->q.net, clone->truesize); + skb_shinfo(head)->frag_list = clone; + nextp = &clone->next; + } else { + nextp = &skb_shinfo(head)->frag_list; } - skb_shinfo(head)->frag_list = head->next; skb_push(head, head->data - skb_network_header(head)); - for (fp=head->next; fp; fp = fp->next) { - head->data_len += fp->len; - head->len += fp->len; - if (head->ip_summed != fp->ip_summed) - head->ip_summed = CHECKSUM_NONE; - else if (head->ip_summed == CHECKSUM_COMPLETE) - head->csum = csum_add(head->csum, fp->csum); - head->truesize += fp->truesize; + /* Traverse the tree in order, to build frag_list. */ + fp = FRAG_CB(head)->next_frag; + rbn = rb_next(&head->rbnode); + rb_erase(&head->rbnode, &qp->q.rb_fragments); + while (rbn || fp) { + /* fp points to the next sk_buff in the current run; + * rbn points to the next run. + */ + /* Go through the current run. */ + while (fp) { + *nextp = fp; + nextp = &fp->next; + fp->prev = NULL; + memset(&fp->rbnode, 0, sizeof(fp->rbnode)); + fp->sk = NULL; + head->data_len += fp->len; + head->len += fp->len; + if (head->ip_summed != fp->ip_summed) + head->ip_summed = CHECKSUM_NONE; + else if (head->ip_summed == CHECKSUM_COMPLETE) + head->csum = csum_add(head->csum, fp->csum); + head->truesize += fp->truesize; + fp = FRAG_CB(fp)->next_frag; + } + /* Move to the next run. */ + if (rbn) { + struct rb_node *rbnext = rb_next(rbn); + + fp = rb_to_skb(rbn); + rb_erase(rbn, &qp->q.rb_fragments); + rbn = rbnext; + } } sub_frag_mem_limit(qp->q.net, head->truesize); + *nextp = NULL; head->next = NULL; + head->prev = NULL; head->dev = dev; head->tstamp = qp->q.stamp; IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size); @@ -648,7 +656,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; + qp->q.rb_fragments = RB_ROOT; qp->q.fragments_tail = NULL; + qp->q.last_run_head = NULL; return 0; out_nomem: @@ -656,7 +666,7 @@ out_nomem: err = -ENOMEM; goto out_fail; out_oversize: - net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); + net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr); out_fail: IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); return err; @@ -734,25 +744,46 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) } EXPORT_SYMBOL(ip_check_defrag); +unsigned int inet_frag_rbtree_purge(struct rb_root *root) +{ + struct rb_node *p = rb_first(root); + unsigned int sum = 0; + + while (p) { + struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); + + p = rb_next(p); + rb_erase(&skb->rbnode, root); + while (skb) { + struct sk_buff *next = FRAG_CB(skb)->next_frag; + + sum += skb->truesize; + kfree_skb(skb); + skb = next; + } + } + return sum; +} +EXPORT_SYMBOL(inet_frag_rbtree_purge); + #ifdef CONFIG_SYSCTL -static int zero; +static int dist_min; static struct ctl_table ip4_frags_ns_ctl_table[] = { { .procname = "ipfrag_high_thresh", .data = &init_net.ipv4.frags.high_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &init_net.ipv4.frags.low_thresh }, { .procname = "ipfrag_low_thresh", .data = &init_net.ipv4.frags.low_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, + .proc_handler = proc_doulongvec_minmax, .extra2 = &init_net.ipv4.frags.high_thresh }, { @@ -781,7 +812,7 @@ static struct ctl_table ip4_frags_ctl_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero + .extra1 = &dist_min, }, { } }; @@ -853,6 +884,8 @@ static void __init ip4_frags_ctl_register(void) static int __net_init ipv4_frags_init_net(struct net *net) { + int res; + /* Fragment cache limits. * * The fragment memory accounting code, (tries to) account for @@ -876,15 +909,21 @@ static int __net_init ipv4_frags_init_net(struct net *net) */ net->ipv4.frags.timeout = IP_FRAG_TIME; - inet_frags_init_net(&net->ipv4.frags); + net->ipv4.frags.f = &ip4_frags; - return ip4_frags_ns_ctl_register(net); + res = inet_frags_init_net(&net->ipv4.frags); + if (res < 0) + return res; + res = ip4_frags_ns_ctl_register(net); + if (res < 0) + inet_frags_exit_net(&net->ipv4.frags); + return res; } static void __net_exit ipv4_frags_exit_net(struct net *net) { ip4_frags_ns_ctl_unregister(net); - inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); + inet_frags_exit_net(&net->ipv4.frags); } static struct pernet_operations ip4_frags_ops = { @@ -892,18 +931,50 @@ static struct pernet_operations ip4_frags_ops = { .exit = ipv4_frags_exit_net, }; + +static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed) +{ + return jhash2(data, + sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); +} + +static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed) +{ + const struct inet_frag_queue *fq = data; + + return jhash2((const u32 *)&fq->key.v4, + sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); +} + +static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) +{ + const struct frag_v4_compare_key *key = arg->key; + const struct inet_frag_queue *fq = ptr; + + return !!memcmp(&fq->key, key, sizeof(*key)); +} + +static const struct rhashtable_params ip4_rhash_params = { + .head_offset = offsetof(struct inet_frag_queue, node), + .key_offset = offsetof(struct inet_frag_queue, key), + .key_len = sizeof(struct frag_v4_compare_key), + .hashfn = ip4_key_hashfn, + .obj_hashfn = ip4_obj_hashfn, + .obj_cmpfn = ip4_obj_cmpfn, + .automatic_shrinking = true, +}; + void __init ipfrag_init(void) { - ip4_frags_ctl_register(); - register_pernet_subsys(&ip4_frags_ops); - ip4_frags.hashfn = ip4_hashfn; ip4_frags.constructor = ip4_frag_init; ip4_frags.destructor = ip4_frag_free; ip4_frags.skb_free = NULL; ip4_frags.qsize = sizeof(struct ipq); - ip4_frags.match = ip4_frag_match; ip4_frags.frag_expire = ip_expire; ip4_frags.frags_cache_name = ip_frag_cache_name; + ip4_frags.rhash_params = ip4_rhash_params; if (inet_frags_init(&ip4_frags)) panic("IP: failed to allocate ip4_frags cache\n"); + ip4_frags_ctl_register(); + register_pernet_subsys(&ip4_frags_ops); } diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index b1209b63381f..eb1834f2682f 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -444,6 +444,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, goto drop; } + iph = ip_hdr(skb); skb->transport_header = skb->network_header + iph->ihl*4; /* Remove any debris in the socket control block */ diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 3abd9d7a3adf..b001ad668108 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -52,7 +52,6 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) { struct net *net = seq->private; - unsigned int frag_mem; int orphans, sockets; local_bh_disable(); @@ -72,8 +71,9 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) sock_prot_inuse_get(net, &udplite_prot)); seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse_get(net, &raw_prot)); - frag_mem = ip_frag_mem(net); - seq_printf(seq, "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem); + seq_printf(seq, "FRAG: inuse %u memory %lu\n", + atomic_read(&net->ipv4.frags.rhashtable.nelems), + frag_mem_limit(&net->ipv4.frags)); return 0; } @@ -132,6 +132,7 @@ static const struct snmp_mib snmp4_ipextstats_list[] = { SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS), SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS), SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS), + SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS), SNMP_MIB_SENTINEL }; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 043c30d08220..28e03fce9e89 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -361,6 +361,9 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = -EINVAL; goto out_unlock; } + } + + if (sk->sk_bound_dev_if) { dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); if (!dev) { err = -ENODEV; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 5a9ae56e7868..664c84e47bab 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -64,7 +64,6 @@ struct nf_ct_frag6_skb_cb static struct inet_frags nf_frags; #ifdef CONFIG_SYSCTL -static int zero; static struct ctl_table nf_ct_frag6_sysctl_table[] = { { @@ -77,18 +76,17 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = { { .procname = "nf_conntrack_frag6_low_thresh", .data = &init_net.nf_frag.frags.low_thresh, - .maxlen = sizeof(unsigned int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, + .proc_handler = proc_doulongvec_minmax, .extra2 = &init_net.nf_frag.frags.high_thresh }, { .procname = "nf_conntrack_frag6_high_thresh", .data = &init_net.nf_frag.frags.high_thresh, - .maxlen = sizeof(unsigned int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &init_net.nf_frag.frags.low_thresh }, { } @@ -153,23 +151,6 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK); } -static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr, - const struct in6_addr *daddr) -{ - net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd)); - return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr), - (__force u32)id, nf_frags.rnd); -} - - -static unsigned int nf_hashfn(const struct inet_frag_queue *q) -{ - const struct frag_queue *nq; - - nq = container_of(q, struct frag_queue, q); - return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr); -} - static void nf_skb_free(struct sk_buff *skb) { if (NFCT_FRAG6_CB(skb)->orig) @@ -184,34 +165,26 @@ static void nf_ct_frag6_expire(unsigned long data) fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); net = container_of(fq->q.net, struct net, nf_frag.frags); - ip6_expire_frag_queue(net, fq, &nf_frags); + ip6_expire_frag_queue(net, fq); } /* Creation primitives. */ -static inline struct frag_queue *fq_find(struct net *net, __be32 id, - u32 user, struct in6_addr *src, - struct in6_addr *dst, int iif, u8 ecn) +static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user, + const struct ipv6hdr *hdr, int iif) { + struct frag_v6_compare_key key = { + .id = id, + .saddr = hdr->saddr, + .daddr = hdr->daddr, + .user = user, + .iif = iif, + }; struct inet_frag_queue *q; - struct ip6_create_arg arg; - unsigned int hash; - - arg.id = id; - arg.user = user; - arg.src = src; - arg.dst = dst; - arg.iif = iif; - arg.ecn = ecn; - - local_bh_disable(); - hash = nf_hash_frag(id, src, dst); - - q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); - local_bh_enable(); - if (IS_ERR_OR_NULL(q)) { - inet_frag_maybe_warn_overflow(q, pr_fmt()); + + q = inet_frag_find(&net->nf_frag.frags, &key); + if (!q) return NULL; - } + return container_of(q, struct frag_queue, q); } @@ -362,7 +335,7 @@ found: return 0; discard_fq: - inet_frag_kill(&fq->q, &nf_frags); + inet_frag_kill(&fq->q); err: return -1; } @@ -383,7 +356,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev) int payload_len; u8 ecn; - inet_frag_kill(&fq->q, &nf_frags); + inet_frag_kill(&fq->q); WARN_ON(head == NULL); WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); @@ -454,6 +427,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev) else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; + fp->sk = NULL; } sub_frag_mem_limit(fq->q.net, head->truesize); @@ -472,6 +446,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev) head->csum); fq->q.fragments = NULL; + fq->q.rb_fragments = RB_ROOT; fq->q.fragments_tail = NULL; /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ @@ -601,9 +576,13 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use hdr = ipv6_hdr(clone); fhdr = (struct frag_hdr *)skb_transport_header(clone); + if (clone->len - skb_network_offset(clone) < IPV6_MIN_MTU && + fhdr->frag_off & htons(IP6_MF)) + goto ret_orig; + skb_orphan(skb); - fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, - skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); + fq = fq_find(net, fhdr->identification, user, hdr, + skb->dev ? skb->dev->ifindex : 0); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); goto ret_orig; @@ -614,7 +593,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { spin_unlock_bh(&fq->q.lock); pr_debug("Can't insert skb to queue\n"); - inet_frag_put(&fq->q, &nf_frags); + inet_frag_put(&fq->q); goto ret_orig; } @@ -626,7 +605,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use } spin_unlock_bh(&fq->q.lock); - inet_frag_put(&fq->q, &nf_frags); + inet_frag_put(&fq->q); return ret_skb; ret_orig: @@ -650,18 +629,26 @@ EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig); static int nf_ct_net_init(struct net *net) { + int res; + net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH; net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT; - inet_frags_init_net(&net->nf_frag.frags); - - return nf_ct_frag6_sysctl_register(net); + net->nf_frag.frags.f = &nf_frags; + + res = inet_frags_init_net(&net->nf_frag.frags); + if (res < 0) + return res; + res = nf_ct_frag6_sysctl_register(net); + if (res < 0) + inet_frags_exit_net(&net->nf_frag.frags); + return res; } static void nf_ct_net_exit(struct net *net) { nf_ct_frags6_sysctl_unregister(net); - inet_frags_exit_net(&net->nf_frag.frags, &nf_frags); + inet_frags_exit_net(&net->nf_frag.frags); } static struct pernet_operations nf_ct_net_ops = { @@ -673,14 +660,13 @@ int nf_ct_frag6_init(void) { int ret = 0; - nf_frags.hashfn = nf_hashfn; nf_frags.constructor = ip6_frag_init; nf_frags.destructor = NULL; nf_frags.skb_free = nf_skb_free; nf_frags.qsize = sizeof(struct frag_queue); - nf_frags.match = ip6_frag_match; nf_frags.frag_expire = nf_ct_frag6_expire; nf_frags.frags_cache_name = nf_frags_cache_name; + nf_frags.rhash_params = ip6_rhash_params; ret = inet_frags_init(&nf_frags); if (ret) goto out; diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 679253d0af84..73e766e7bc37 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -33,7 +33,6 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v) { struct net *net = seq->private; - unsigned int frag_mem = ip6_frag_mem(net); seq_printf(seq, "TCP6: inuse %d\n", sock_prot_inuse_get(net, &tcpv6_prot)); @@ -43,7 +42,9 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v) sock_prot_inuse_get(net, &udplitev6_prot)); seq_printf(seq, "RAW6: inuse %d\n", sock_prot_inuse_get(net, &rawv6_prot)); - seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem); + seq_printf(seq, "FRAG6: inuse %u memory %lu\n", + atomic_read(&net->ipv6.frags.rhashtable.nelems), + frag_mem_limit(&net->ipv6.frags)); return 0; } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 58f2139ebb5e..ec917f58d105 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -79,94 +79,58 @@ static struct inet_frags ip6_frags; static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev); -/* - * callers should be careful not to use the hash value outside the ipfrag_lock - * as doing so could race with ipfrag_hash_rnd being recalculated. - */ -static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, - const struct in6_addr *daddr) -{ - net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd)); - return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr), - (__force u32)id, ip6_frags.rnd); -} - -static unsigned int ip6_hashfn(const struct inet_frag_queue *q) -{ - const struct frag_queue *fq; - - fq = container_of(q, struct frag_queue, q); - return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr); -} - -bool ip6_frag_match(const struct inet_frag_queue *q, const void *a) -{ - const struct frag_queue *fq; - const struct ip6_create_arg *arg = a; - - fq = container_of(q, struct frag_queue, q); - return fq->id == arg->id && - fq->user == arg->user && - ipv6_addr_equal(&fq->saddr, arg->src) && - ipv6_addr_equal(&fq->daddr, arg->dst) && - (arg->iif == fq->iif || - !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST | - IPV6_ADDR_LINKLOCAL))); -} -EXPORT_SYMBOL(ip6_frag_match); - void ip6_frag_init(struct inet_frag_queue *q, const void *a) { struct frag_queue *fq = container_of(q, struct frag_queue, q); - const struct ip6_create_arg *arg = a; + const struct frag_v6_compare_key *key = a; - fq->id = arg->id; - fq->user = arg->user; - fq->saddr = *arg->src; - fq->daddr = *arg->dst; - fq->ecn = arg->ecn; + q->key.v6 = *key; + fq->ecn = 0; } EXPORT_SYMBOL(ip6_frag_init); -void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq, - struct inet_frags *frags) +void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq) { struct net_device *dev = NULL; + struct sk_buff *head; + rcu_read_lock(); spin_lock(&fq->q.lock); if (fq->q.flags & INET_FRAG_COMPLETE) goto out; - inet_frag_kill(&fq->q, frags); + inet_frag_kill(&fq->q); - rcu_read_lock(); dev = dev_get_by_index_rcu(net, fq->iif); if (!dev) - goto out_rcu_unlock; + goto out; IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); - - if (inet_frag_evicting(&fq->q)) - goto out_rcu_unlock; - IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); /* Don't send error if the first segment did not arrive. */ - if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments) - goto out_rcu_unlock; + head = fq->q.fragments; + if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head) + goto out; /* But use as source device on which LAST ARRIVED * segment was received. And do not use fq->dev * pointer directly, device might already disappeared. */ - fq->q.fragments->dev = dev; - icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); -out_rcu_unlock: - rcu_read_unlock(); + head->dev = dev; + skb_get(head); + spin_unlock(&fq->q.lock); + + icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); + kfree_skb(head); + goto out_rcu_unlock; + out: spin_unlock(&fq->q.lock); - inet_frag_put(&fq->q, frags); +out_rcu_unlock: + rcu_read_unlock(); + inet_frag_put(&fq->q); } EXPORT_SYMBOL(ip6_expire_frag_queue); @@ -178,31 +142,29 @@ static void ip6_frag_expire(unsigned long data) fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); net = container_of(fq->q.net, struct net, ipv6.frags); - ip6_expire_frag_queue(net, fq, &ip6_frags); + ip6_expire_frag_queue(net, fq); } static struct frag_queue * -fq_find(struct net *net, __be32 id, const struct in6_addr *src, - const struct in6_addr *dst, int iif, u8 ecn) +fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif) { + struct frag_v6_compare_key key = { + .id = id, + .saddr = hdr->saddr, + .daddr = hdr->daddr, + .user = IP6_DEFRAG_LOCAL_DELIVER, + .iif = iif, + }; struct inet_frag_queue *q; - struct ip6_create_arg arg; - unsigned int hash; - arg.id = id; - arg.user = IP6_DEFRAG_LOCAL_DELIVER; - arg.src = src; - arg.dst = dst; - arg.iif = iif; - arg.ecn = ecn; + if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST | + IPV6_ADDR_LINKLOCAL))) + key.iif = 0; - hash = inet6_hash_frag(id, src, dst); - - q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); - if (IS_ERR_OR_NULL(q)) { - inet_frag_maybe_warn_overflow(q, pr_fmt()); + q = inet_frag_find(&net->ipv6.frags, &key); + if (!q) return NULL; - } + return container_of(q, struct frag_queue, q); } @@ -359,7 +321,7 @@ found: return -1; discard_fq: - inet_frag_kill(&fq->q, &ip6_frags); + inet_frag_kill(&fq->q); err: IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); @@ -386,7 +348,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, int sum_truesize; u8 ecn; - inet_frag_kill(&fq->q, &ip6_frags); + inet_frag_kill(&fq->q); ecn = ip_frag_ecn_table[fq->ecn]; if (unlikely(ecn == 0xff)) @@ -503,6 +465,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); rcu_read_unlock(); fq->q.fragments = NULL; + fq->q.rb_fragments = RB_ROOT; fq->q.fragments_tail = NULL; return 1; @@ -524,6 +487,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) struct frag_queue *fq; const struct ipv6hdr *hdr = ipv6_hdr(skb); struct net *net = dev_net(skb_dst(skb)->dev); + int iif; if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) goto fail_hdr; @@ -552,17 +516,22 @@ static int ipv6_frag_rcv(struct sk_buff *skb) return 1; } - fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, - skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); + if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU && + fhdr->frag_off & htons(IP6_MF)) + goto fail_hdr; + + iif = skb->dev ? skb->dev->ifindex : 0; + fq = fq_find(net, fhdr->identification, hdr, iif); if (fq) { int ret; spin_lock(&fq->q.lock); + fq->iif = iif; ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); spin_unlock(&fq->q.lock); - inet_frag_put(&fq->q, &ip6_frags); + inet_frag_put(&fq->q); return ret; } @@ -583,24 +552,22 @@ static const struct inet6_protocol frag_protocol = { }; #ifdef CONFIG_SYSCTL -static int zero; static struct ctl_table ip6_frags_ns_ctl_table[] = { { .procname = "ip6frag_high_thresh", .data = &init_net.ipv6.frags.high_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &init_net.ipv6.frags.low_thresh }, { .procname = "ip6frag_low_thresh", .data = &init_net.ipv6.frags.low_thresh, - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, + .proc_handler = proc_doulongvec_minmax, .extra2 = &init_net.ipv6.frags.high_thresh }, { @@ -708,19 +675,27 @@ static void ip6_frags_sysctl_unregister(void) static int __net_init ipv6_frags_init_net(struct net *net) { + int res; + net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH; net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; + net->ipv6.frags.f = &ip6_frags; - inet_frags_init_net(&net->ipv6.frags); + res = inet_frags_init_net(&net->ipv6.frags); + if (res < 0) + return res; - return ip6_frags_ns_sysctl_register(net); + res = ip6_frags_ns_sysctl_register(net); + if (res < 0) + inet_frags_exit_net(&net->ipv6.frags); + return res; } static void __net_exit ipv6_frags_exit_net(struct net *net) { ip6_frags_ns_sysctl_unregister(net); - inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); + inet_frags_exit_net(&net->ipv6.frags); } static struct pernet_operations ip6_frags_ops = { @@ -728,14 +703,55 @@ static struct pernet_operations ip6_frags_ops = { .exit = ipv6_frags_exit_net, }; +static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed) +{ + return jhash2(data, + sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); +} + +static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed) +{ + const struct inet_frag_queue *fq = data; + + return jhash2((const u32 *)&fq->key.v6, + sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); +} + +static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) +{ + const struct frag_v6_compare_key *key = arg->key; + const struct inet_frag_queue *fq = ptr; + + return !!memcmp(&fq->key, key, sizeof(*key)); +} + +const struct rhashtable_params ip6_rhash_params = { + .head_offset = offsetof(struct inet_frag_queue, node), + .hashfn = ip6_key_hashfn, + .obj_hashfn = ip6_obj_hashfn, + .obj_cmpfn = ip6_obj_cmpfn, + .automatic_shrinking = true, +}; +EXPORT_SYMBOL(ip6_rhash_params); + int __init ipv6_frag_init(void) { int ret; - ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); + ip6_frags.constructor = ip6_frag_init; + ip6_frags.destructor = NULL; + ip6_frags.qsize = sizeof(struct frag_queue); + ip6_frags.frag_expire = ip6_frag_expire; + ip6_frags.frags_cache_name = ip6_frag_cache_name; + ip6_frags.rhash_params = ip6_rhash_params; + ret = inet_frags_init(&ip6_frags); if (ret) goto out; + ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); + if (ret) + goto err_protocol; + ret = ip6_frags_sysctl_register(); if (ret) goto err_sysctl; @@ -744,17 +760,6 @@ int __init ipv6_frag_init(void) if (ret) goto err_pernet; - ip6_frags.hashfn = ip6_hashfn; - ip6_frags.constructor = ip6_frag_init; - ip6_frags.destructor = NULL; - ip6_frags.skb_free = NULL; - ip6_frags.qsize = sizeof(struct frag_queue); - ip6_frags.match = ip6_frag_match; - ip6_frags.frag_expire = ip6_frag_expire; - ip6_frags.frags_cache_name = ip6_frag_cache_name; - ret = inet_frags_init(&ip6_frags); - if (ret) - goto err_pernet; out: return ret; @@ -762,6 +767,8 @@ err_pernet: ip6_frags_sysctl_unregister(); err_sysctl: inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); +err_protocol: + inet_frags_fini(&ip6_frags); goto out; } diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 591d18785285..429dbb064240 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -83,8 +83,7 @@ #define L2TP_SLFLAG_S 0x40000000 #define L2TP_SL_SEQ_MASK 0x00ffffff -#define L2TP_HDR_SIZE_SEQ 10 -#define L2TP_HDR_SIZE_NOSEQ 6 +#define L2TP_HDR_SIZE_MAX 14 /* Default trace flags */ #define L2TP_DEFAULT_DEBUG_FLAGS 0 @@ -705,11 +704,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, "%s: recv data ns=%u, session nr=%u\n", session->name, ns, session->nr); } + ptr += 4; } - /* Advance past L2-specific header, if present */ - ptr += session->l2specific_len; - if (L2TP_SKB_CB(skb)->has_seq) { /* Received a packet with sequence numbers. If we're the LNS, * check if we sre sending sequence numbers and if not, @@ -860,7 +857,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, __skb_pull(skb, sizeof(struct udphdr)); /* Short packet? */ - if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { + if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) { l2tp_info(tunnel, L2TP_MSG_DATA, "%s: recv short packet (len=%d)\n", tunnel->name, skb->len); @@ -933,6 +930,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, goto error; } + if (tunnel->version == L2TP_HDR_VER_3 && + l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) + goto error; + l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); return 0; @@ -1031,21 +1032,20 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) memcpy(bufp, &session->cookie[0], session->cookie_len); bufp += session->cookie_len; } - if (session->l2specific_len) { - if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { - u32 l2h = 0; - if (session->send_seq) { - l2h = 0x40000000 | session->ns; - session->ns++; - session->ns &= 0xffffff; - l2tp_dbg(session, L2TP_MSG_SEQ, - "%s: updated ns to %u\n", - session->name, session->ns); - } + if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { + u32 l2h = 0; - *((__be32 *) bufp) = htonl(l2h); + if (session->send_seq) { + l2h = 0x40000000 | session->ns; + session->ns++; + session->ns &= 0xffffff; + l2tp_dbg(session, L2TP_MSG_SEQ, + "%s: updated ns to %u\n", + session->name, session->ns); } - bufp += session->l2specific_len; + + *((__be32 *)bufp) = htonl(l2h); + bufp += 4; } if (session->offset) bufp += session->offset; @@ -1724,7 +1724,7 @@ int l2tp_session_delete(struct l2tp_session *session) EXPORT_SYMBOL_GPL(l2tp_session_delete); /* We come here whenever a session's send_seq, cookie_len or - * l2specific_len parameters are set. + * l2specific_type parameters are set. */ void l2tp_session_set_header_len(struct l2tp_session *session, int version) { @@ -1733,7 +1733,8 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version) if (session->send_seq) session->hdr_len += 4; } else { - session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset; + session->hdr_len = 4 + session->cookie_len + session->offset; + session->hdr_len += l2tp_get_l2specific_len(session); if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP) session->hdr_len += 4; } diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 9cf546846edb..fad47e9d74bc 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -313,6 +313,37 @@ do { \ #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s) #endif +static inline int l2tp_get_l2specific_len(struct l2tp_session *session) +{ + switch (session->l2specific_type) { + case L2TP_L2SPECTYPE_DEFAULT: + return 4; + case L2TP_L2SPECTYPE_NONE: + default: + return 0; + } +} + +static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb, + unsigned char **ptr, unsigned char **optr) +{ + int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session); + + if (opt_len > 0) { + int off = *ptr - *optr; + + if (!pskb_may_pull(skb, off + opt_len)) + return -1; + + if (skb->data != *optr) { + *optr = skb->data; + *ptr = skb->data + off; + } + } + + return 0; +} + #define l2tp_printk(ptr, type, func, fmt, ...) \ do { \ if (((ptr)->debug) & (type)) \ diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index af74e3ba0f92..7efb3cadc152 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -163,6 +163,9 @@ static int l2tp_ip_recv(struct sk_buff *skb) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } + if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) + goto discard; + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); return 0; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index a30f6fb6caa9..2146c9fc0cb4 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -174,6 +174,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } + if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) + goto discard; + l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); return 0; diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index 94d05806a9a2..f0ecaec1ff3d 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c @@ -53,21 +53,21 @@ void nr_start_t1timer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); - mod_timer(&nr->t1timer, jiffies + nr->t1); + sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1); } void nr_start_t2timer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); - mod_timer(&nr->t2timer, jiffies + nr->t2); + sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2); } void nr_start_t4timer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); - mod_timer(&nr->t4timer, jiffies + nr->t4); + sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4); } void nr_start_idletimer(struct sock *sk) @@ -75,37 +75,37 @@ void nr_start_idletimer(struct sock *sk) struct nr_sock *nr = nr_sk(sk); if (nr->idle > 0) - mod_timer(&nr->idletimer, jiffies + nr->idle); + sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle); } void nr_start_heartbeat(struct sock *sk) { - mod_timer(&sk->sk_timer, jiffies + 5 * HZ); + sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ); } void nr_stop_t1timer(struct sock *sk) { - del_timer(&nr_sk(sk)->t1timer); + sk_stop_timer(sk, &nr_sk(sk)->t1timer); } void nr_stop_t2timer(struct sock *sk) { - del_timer(&nr_sk(sk)->t2timer); + sk_stop_timer(sk, &nr_sk(sk)->t2timer); } void nr_stop_t4timer(struct sock *sk) { - del_timer(&nr_sk(sk)->t4timer); + sk_stop_timer(sk, &nr_sk(sk)->t4timer); } void nr_stop_idletimer(struct sock *sk) { - del_timer(&nr_sk(sk)->idletimer); + sk_stop_timer(sk, &nr_sk(sk)->idletimer); } void nr_stop_heartbeat(struct sock *sk) { - del_timer(&sk->sk_timer); + sk_stop_timer(sk, &sk->sk_timer); } int nr_t1timer_running(struct sock *sk) diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 624c4719e404..537917dfa83a 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -409,7 +409,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, return -EINVAL; } - if (!nz || !is_all_zero(nla_data(nla), expected_len)) { + if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) { attrs |= 1 << type; a[type] = nla; } diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 0fc76d845103..9f704a7f2a28 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -848,6 +848,7 @@ void rose_link_device_down(struct net_device *dev) /* * Route a frame to an appropriate AX.25 connection. + * A NULL ax25_cb indicates an internally generated frame. */ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) { @@ -865,6 +866,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) if (skb->len < ROSE_MIN_LEN) return res; + + if (!ax25) + return rose_loopback_queue(skb, NULL); + frametype = skb->data[2]; lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); if (frametype == ROSE_CALL_REQUEST && diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index ca4ecc246347..a4b492bb7fe5 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1857,7 +1857,6 @@ done: int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode) { - __be16 protocol = tc_skb_protocol(skb); #ifdef CONFIG_NET_CLS_ACT const struct tcf_proto *old_tp = tp; int limit = 0; @@ -1865,6 +1864,7 @@ int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, reclassify: #endif for (; tp; tp = rcu_dereference_bh(tp->next)) { + __be16 protocol = tc_skb_protocol(skb); int err; if (tp->protocol != protocol && @@ -1891,7 +1891,6 @@ reset: } tp = old_tp; - protocol = tc_skb_protocol(skb); goto reclassify; #endif } diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 2b96b11fbe71..1d9dfb92b3b4 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -398,7 +398,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { - snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); + int ret; + + ret = + snd_pcm_lib_malloc_pages(substream, + params_buffer_bytes(params)); + if (ret) + return ret; memset(substream->runtime->dma_area, 0, params_buffer_bytes(params)); return 0; } diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index 60edec383281..bf5ee8906fb2 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -41,13 +41,13 @@ static int __report_module(struct addr_location *al, u64 ip, Dwarf_Addr s; dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL); - if (s != al->map->start) + if (s != al->map->start - al->map->pgoff) mod = 0; } if (!mod) mod = dwfl_report_elf(ui->dwfl, dso->short_name, - dso->long_name, -1, al->map->start, + (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff, false); return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1; |
